Merge pull request 'Add program base layer' (#1) from memory-bug-investigation into main
Reviewed-on: #1
This commit is contained in:
commit
1c8481cb86
2
.gitignore
vendored
2
.gitignore
vendored
@ -3,6 +3,8 @@
|
||||
##
|
||||
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
|
||||
|
||||
.stfolder/
|
||||
|
||||
# User-specific files
|
||||
*.suo
|
||||
*.user
|
||||
|
||||
30
exe_main.cpp
30
exe_main.cpp
@ -1,15 +1,19 @@
|
||||
#include "lib_main.cpp"
|
||||
|
||||
#if OS_WINDOWS
|
||||
#if BUILD_CONSOLE_INTERFACE
|
||||
int wmain(int argc, WCHAR **argv) {
|
||||
Win32_Entry_Point(argc, argv);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPWSTR lpCmdLine, int nShowCmd) {
|
||||
Win32_Entry_Point(__argc, __wargv);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#include "lib/third_party/dear-imgui/imgui.cpp"
|
||||
#include "lib/third_party/dear-imgui/imgui_widgets.cpp"
|
||||
#include "lib/third_party/dear-imgui/imgui_draw.cpp"
|
||||
#include "lib/third_party/dear-imgui/imgui_tables.cpp"
|
||||
// #include "lib/third_party/dear-imgui/imgui_demo.cpp"
|
||||
#include "lib/third_party/dear-imgui/imgui_impl_dx11.cpp"
|
||||
#include "lib/third_party/dear-imgui/imgui_impl_win32.cpp"
|
||||
|
||||
#include "lib/third_party/dear-imgui/imgui.h"
|
||||
#include "lib/third_party/dear-imgui/imgui_impl_win32.h"
|
||||
#include "lib/third_party/dear-imgui/imgui_impl_dx11.h"
|
||||
|
||||
#pragma comment(lib, "d3d11")
|
||||
#pragma comment(lib, "d3dcompiler")
|
||||
|
||||
#include "src/Base_Entry_Point.cpp"
|
||||
|
||||
|
||||
@ -2,6 +2,16 @@
|
||||
|
||||
#include "Base.h"
|
||||
|
||||
#define ALLOCATOR_DEBUG_MODE 1
|
||||
#define ALLOCATOR_POISON_MEMORY_ON_ALLOCATION \
|
||||
(BUILD_DEBUG && ALLOCATOR_DEBUG_MODE)
|
||||
|
||||
#if ALLOCATOR_POISON_MEMORY_ON_ALLOCATION
|
||||
#define ALLOCATOR_INIT_VALUE 0xCD
|
||||
#else
|
||||
#define ALLOCATOR_INIT_VALUE 0
|
||||
#endif
|
||||
|
||||
enum class Allocator_Mode: s32 {
|
||||
ALLOCATE = 0,
|
||||
RESIZE = 1,
|
||||
@ -17,7 +27,7 @@ struct Allocator {
|
||||
void* data;
|
||||
|
||||
bool operator ! () {
|
||||
return (proc == nullptr && data == nullptr);
|
||||
return (proc == nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
@ -97,6 +107,10 @@ template <typename T> void zero_struct(T* src) {
|
||||
memset(src, 0, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T> void poison_struct(T* src) {
|
||||
memset(src, 0xCD, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T> T* copy_struct(T* src) {
|
||||
T* dst = New<T>(false);
|
||||
memcpy(dst, src, sizeof(T));
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
#include "Arena.h"
|
||||
#include "Arena_Windows.cpp"
|
||||
|
||||
constexpr u16 ARENA_DEFAULT_ALIGNMENT = CPU_REGISTER_WIDTH_BYTES;
|
||||
|
||||
// For arrays, use `Array`, which is backed by the general purpose allocator
|
||||
// or use `ArenaArray` if you need to expand the size to an unknown size.
|
||||
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
|
||||
@ -13,7 +11,7 @@ void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_siz
|
||||
return arena_alloc(arena, requested_size);
|
||||
} break;
|
||||
case Allocator_Mode::RESIZE: {
|
||||
Assert(false); // DO NOT USE RESIZE WITH ARENAS!
|
||||
Assert(false); // DO NOT USE RESIZE WITH ARENAS! :ArenaResizing
|
||||
// Or maybeeee...
|
||||
// Resize should check if current_point matches the end of the old allocation?
|
||||
// and resize accordingly + pass back same pointer.
|
||||
@ -102,12 +100,15 @@ void arena_set_bootstrap_flag (Arena* arena) { arena->flags |= Arena_Flags::Is_B
|
||||
|
||||
void arena_reset_keeping_memory (Arena* arena) {
|
||||
if (!is_valid(arena)) return;
|
||||
|
||||
arena->current_point = arena_start(arena);
|
||||
}
|
||||
|
||||
void arena_reset (Arena* arena) {
|
||||
if (!is_valid(arena)) return;
|
||||
|
||||
arena->current_point = arena_start(arena);
|
||||
|
||||
free_pages_down_to(arena, arena->initial_commit_page_count);
|
||||
}
|
||||
|
||||
@ -116,6 +117,8 @@ void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_fun
|
||||
Assert(wipe_function != nullptr);
|
||||
if (wipe_function == nullptr) return;
|
||||
wipe_function(arena_start(arena), (u64)(arena->current_point - arena_start(arena)));
|
||||
|
||||
arena_reset(arena);
|
||||
}
|
||||
|
||||
void* arena_alloc (Arena* arena, s64 byte_count) {
|
||||
@ -129,7 +132,7 @@ void* arena_alloc (Arena* arena, s64 byte_count) {
|
||||
|
||||
if (result_end > arena->first_uncommitted_page) {
|
||||
if (result_end > arena_address_limit(arena)) {
|
||||
// #TODO: Log error here:
|
||||
printf("[Error] Failed to allocate because Arena is full and cannot expand!\n");
|
||||
Assert(false); // Failed to allocate because arena is full and cannot expand!
|
||||
} else {
|
||||
extend_committed_pages(arena, result_end);
|
||||
@ -174,10 +177,30 @@ s64 reserve_size (Arena_Reserve ar) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr s64 Arena_Sizes[6] = {
|
||||
64LL * 1024,
|
||||
2LL * 1024 * 1024,
|
||||
64LL * 1024 * 1024,
|
||||
2LL * 1024 * 1024 * 1024,
|
||||
64LL * 1024 * 1024 * 1024,
|
||||
2LL * 1024 * 1024 * 1024 * 1024,
|
||||
};
|
||||
|
||||
Arena_Reserve next_reserve_size (s64 size) {
|
||||
for (u8 i = 0; i < 6; i += 1) {
|
||||
if (size <= Arena_Sizes[i]) {
|
||||
return (Arena_Reserve)i;
|
||||
}
|
||||
}
|
||||
|
||||
return Arena_Reserve::Size_64T;
|
||||
}
|
||||
|
||||
// arena_usage_bytes is kinda pointless tbh.
|
||||
s64 arena_usage_bytes (Arena* arena) { return (s64)(arena->current_point - arena_start(arena)); }
|
||||
s64 arena_usage_committed_bytes (Arena* arena) { return (s64)(arena->first_uncommitted_page - arena->memory_base); }
|
||||
|
||||
// for arena details, I need to setup my string builder first.
|
||||
Allocator get_allocator(Arena* arena) {
|
||||
Allocator get_allocator (Arena* arena) {
|
||||
return { arena_allocator_proc, arena };
|
||||
}
|
||||
|
||||
@ -1,10 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
struct ExpandableArena; // fwd declare #temp
|
||||
|
||||
#if OS_WINDOWS
|
||||
constexpr u32 ARENA_DEFAULT_COMMIT_PAGE_COUNT = 16; // 16 * 4k page = 64kB
|
||||
constexpr s64 ARENA_DEFAULT_COMMIT_SIZE_BYTES = 65536;
|
||||
#endif
|
||||
|
||||
constexpr u16 ARENA_DEFAULT_ALIGNMENT = CPU_REGISTER_WIDTH_BYTES;
|
||||
|
||||
#define ARENA_DEBUG BUILD_DEBUG
|
||||
|
||||
enum class Arena_Reserve: u8 {
|
||||
@ -57,7 +61,19 @@ typedef void* (*Memory_Wipe_Function)(void* memory, u64 byte_count);
|
||||
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
|
||||
// Interface API for normal use (idk how to explain - see Arena_Table.cpp)
|
||||
void initialize_arena_table ();
|
||||
#include <mutex> // #TODO: Replace with Mutex (see OS_Win32.cpp)
|
||||
struct Arena_Table {
|
||||
std::mutex mutex;
|
||||
s32 in_flight_count[6];
|
||||
Array<Arena*> free_table[6];
|
||||
|
||||
#if ARENA_DEBUG
|
||||
Array<Arena*> in_flight[6];
|
||||
#endif
|
||||
b32 initialized;
|
||||
};
|
||||
|
||||
void initialize_arena_table (Allocator allocator);
|
||||
Arena* next_arena (Arena_Reserve reserve_size);
|
||||
void release_arena (Arena* arena, bool delete_extra_pages=true);
|
||||
|
||||
@ -71,6 +87,8 @@ void arena_clear_flags (Arena* arena);
|
||||
void arena_set_bootstrap_flag (Arena* arena);
|
||||
void arena_set_secure_flag (Arena* arena);
|
||||
|
||||
bool arena_is_bootstrapped (Arena* arena);
|
||||
|
||||
void arena_reset_keeping_memory (Arena* arena); // just sets current point to arena_start
|
||||
void arena_reset (Arena* arena); // frees excess pages
|
||||
void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_function);
|
||||
@ -87,7 +105,7 @@ s64 arena_usage_committed_bytes (Arena* arena);
|
||||
s64 reserve_size (Arena* arena);
|
||||
s64 reserve_size (Arena_Reserve ar);
|
||||
bool is_valid (Arena* arena);
|
||||
Allocator get_allocator(Arena* arena);
|
||||
Allocator get_allocator (Arena* arena);
|
||||
|
||||
// Platform-Specific Implementations (forward-declared)
|
||||
void platform_init (Arena* arena, s64 new_reserve);
|
||||
@ -95,14 +113,20 @@ void extend_committed_pages (Arena* arena, u8* end);
|
||||
void free_pages_down_to (Arena* arena, s64 pages_to_keep);
|
||||
void arena_delete (Arena* arena);
|
||||
|
||||
Arena_Reserve next_reserve_size (s64 size);
|
||||
|
||||
struct Auto_Reset {
|
||||
Arena* arena;
|
||||
u8* starting_point;
|
||||
|
||||
Auto_Reset(Arena* arena) {
|
||||
Assert(is_valid(arena));
|
||||
this->arena = arena;
|
||||
this->starting_point = arena->current_point;
|
||||
Assert(is_valid(arena));
|
||||
}
|
||||
|
||||
Auto_Reset(ExpandableArena* arena_ex) {
|
||||
Auto_Reset((Arena*)arena_ex);
|
||||
}
|
||||
|
||||
~Auto_Reset() {
|
||||
@ -137,6 +161,10 @@ struct Push_Alignment { // #rename to Arena_Push_Alignment?
|
||||
this->arena->alignment = alignment;
|
||||
}
|
||||
|
||||
Push_Alignment(ExpandableArena* arena_ex, u16 alignment) {
|
||||
Push_Alignment((Arena*)arena_ex, alignment);
|
||||
}
|
||||
|
||||
~Push_Alignment() {
|
||||
arena->alignment = original_alignment;
|
||||
}
|
||||
|
||||
@ -8,9 +8,11 @@ struct ArenaArray { // downcasts to an ArrayView.
|
||||
s64 count;
|
||||
T* data;
|
||||
s64 allocated;
|
||||
Arena* arena; // We can probably assume arena is &Array-32
|
||||
Arena* arena;
|
||||
|
||||
ArenaArray() {}
|
||||
ArenaArray() {
|
||||
memset(this, 0, sizeof(*this));
|
||||
}
|
||||
|
||||
T& operator[] (s64 index) {
|
||||
#if ARRAY_ENABLE_BOUNDS_CHECKING
|
||||
@ -23,7 +25,7 @@ struct ArenaArray { // downcasts to an ArrayView.
|
||||
|
||||
// #NOTE: I am not defining arena_array_init (ArenaArray<T>*), because I do not want to
|
||||
// encourage it's usage!
|
||||
// #TODO: array_free vs arena_array_destroy or arena_array_delete or
|
||||
// Use arena_array_free to reset
|
||||
template <typename T>
|
||||
ArenaArray<T>* arena_array_new (s64 preallocate_count, Arena_Reserve reserve_size) {
|
||||
Arena* arena = next_arena(reserve_size);
|
||||
@ -37,15 +39,15 @@ ArenaArray<T>* arena_array_new (s64 preallocate_count, Arena_Reserve reserve_siz
|
||||
array->allocated = preallocate_count;
|
||||
}
|
||||
|
||||
array.count = 0;
|
||||
array.arena = new_arena;
|
||||
array.data = array_start(array);
|
||||
array->count = 0;
|
||||
array->arena = arena;
|
||||
array->data = array_start<T>(*array);
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
template <typename T> T* array_start (ArenaArray<T>& array) {
|
||||
return (array->arena->memory_base + ARRAY_ARENA_START_OFFSET);
|
||||
return (array.arena->memory_base + ARRAY_ARENA_START_OFFSET);
|
||||
}
|
||||
|
||||
template <typename T> bool is_empty (ArenaArray<T>& array) {
|
||||
@ -57,11 +59,11 @@ template <typename T> s64 memory_usage (ArenaArray<T>& array) {
|
||||
return arena_usage_committed_bytes(array.arena);
|
||||
}
|
||||
|
||||
template <typename T> void array_free (ArenaArray<T>& array) {
|
||||
array.count = 0;
|
||||
array.allocated = 0;
|
||||
|
||||
release_arena(array.arena, delete_extra_pages=true);
|
||||
template <typename T> void arena_array_free (ArenaArray<T>& array) {
|
||||
release_arena(array.arena, true);
|
||||
#if BUILD_DEBUG
|
||||
poison_struct(&array);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T> ArrayView<T> array_view (ArenaArray<T> array) {
|
||||
@ -97,6 +99,19 @@ template <typename T> ArrayView<T> to_view (ArenaArray<T>& array, s64 start_offs
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T> void array_add (ArenaArray<T>& array, ArrayView<T> items) {
|
||||
T* current_point = &array.data[array.count];
|
||||
s64 final_count = array.allocated + items.count;
|
||||
|
||||
if (array.allocated < final_count) {
|
||||
array_reserve(array, final_count);
|
||||
}
|
||||
|
||||
memcpy(current_point, items.data, items.count * sizeof(T));
|
||||
|
||||
array.count += items.count;
|
||||
}
|
||||
|
||||
template <typename T> void array_add (ArenaArray<T>& array, T item) {
|
||||
maybe_grow(array);
|
||||
array.data[array.count] = item;
|
||||
@ -149,7 +164,7 @@ void array_arena_realloc (ArenaArray<u8>& array, s64 new_size, s64 old_size) {
|
||||
if (result_end > array.arena->first_uncommitted_page) {
|
||||
// Critical error if we run out of address space!
|
||||
if (result_end > arena_address_limit(array.arena)) {
|
||||
// #TODO Log error.
|
||||
printf("[Error] Failed to allocate because Arena is full and cannot expand!\n");
|
||||
Assert(false); // Failed to allocate because Arena is full and cannot expand
|
||||
return;
|
||||
}
|
||||
@ -173,6 +188,14 @@ template <typename T> void init_range (T* ptr, s64 start_offset, s64 end_offset)
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> void poison_range (ArenaArray<T>& array, s64 start, s64 count) {
|
||||
Assert(start >= 0 && start < array.count);
|
||||
Assert(start + count <= array.count);
|
||||
// Check that these ranges make sense
|
||||
T* start_address = &array[start];
|
||||
memset(start_address, 0xCD, count * sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T> force_inline void array_reset (ArenaArray<T>& array) {
|
||||
// reset backing array:
|
||||
arena_reset(array.arena);
|
||||
|
||||
@ -1,59 +1,87 @@
|
||||
// #TODO: #Arena_Table #garbage_collection in `release_arena`
|
||||
// [ ] Garbage collection if we have >> 64 in a particular table for a while.
|
||||
// There should be some parameters regarding what the upper limit for idle
|
||||
// committed pages should be and a heuristic for maximum number of arenas waiting
|
||||
|
||||
// API in Arena.h
|
||||
#include <mutex>
|
||||
|
||||
global std::mutex arena_table_mutex;
|
||||
global s32 arenas_in_flight_count[6];
|
||||
global Array<Arena*> arena_free_table[6];
|
||||
#if BUILD_DEBUG
|
||||
global Array<Arena*> arenas_in_flight[6];
|
||||
#endif
|
||||
global Arena_Table* arena_table;
|
||||
|
||||
void initialize_arena_table () {
|
||||
// global b64 arena_tables_initialized = false;
|
||||
// global std::mutex arena_table_mutex;
|
||||
// global s32 arenas_in_flight_count[6];
|
||||
// global Array<Arena*> arena_free_table[6];
|
||||
// #if BUILD_DEBUG
|
||||
// global Array<Arena*> arenas_in_flight[6];
|
||||
// #endif
|
||||
|
||||
// Only call once from main thread!
|
||||
void initialize_arena_table (Allocator allocator) {
|
||||
Assert(arena_table != nullptr);
|
||||
if (arena_table->initialized)
|
||||
return;
|
||||
|
||||
for (s32 i = 0; i < 6; i += 1) {
|
||||
arena_free_table[i].allocator = GPAllocator();
|
||||
arenas_in_flight[i].allocator = GPAllocator();
|
||||
array_reserve(arena_free_table[i], 64);
|
||||
array_reserve(arenas_in_flight[i], 64);
|
||||
arena_table->in_flight_count[i] = 0;
|
||||
arena_table->free_table[i].allocator = allocator;
|
||||
array_reserve(arena_table->free_table[i], 64);
|
||||
#if ARENA_DEBUG
|
||||
arena_table->in_flight[i].allocator = allocator;
|
||||
array_reserve(arena_table->in_flight[i], 64);
|
||||
#endif
|
||||
}
|
||||
|
||||
arena_table->initialized = true;
|
||||
}
|
||||
|
||||
Arena* next_arena (Arena_Reserve reserve_size) {
|
||||
Assert(arena_table != nullptr);
|
||||
Arena* arena;
|
||||
std::lock_guard<std::mutex> lock(arena_table_mutex);
|
||||
std::lock_guard<std::mutex> lock(arena_table->mutex);
|
||||
s64 reserve_index = (s64)reserve_size;
|
||||
arenas_in_flight_count[reserve_index] += 1;
|
||||
if (!arena_free_table[reserve_index].count) {
|
||||
|
||||
if (!arena_table->free_table[reserve_index].count) {
|
||||
arena = bootstrap_arena(reserve_size, ARENA_DEFAULT_COMMIT_PAGE_COUNT);
|
||||
} else {
|
||||
arena = pop(arena_free_table[reserve_index]);
|
||||
arena = pop(arena_table->free_table[reserve_index]);
|
||||
}
|
||||
#if ARENA_DEBUG
|
||||
array_add(arenas_in_flight[reserve_index], arena);
|
||||
array_add(arena_table->in_flight[reserve_index], arena);
|
||||
#endif
|
||||
|
||||
arena_table->in_flight_count[reserve_index] += 1;
|
||||
|
||||
Assert(arena != nullptr);
|
||||
|
||||
return arena;
|
||||
}
|
||||
|
||||
void release_arena (Arena* arena, bool delete_extra_pages) {
|
||||
std::lock_guard<std::mutex> lock(arena_table_mutex);
|
||||
Assert(arena_table != nullptr);
|
||||
Assert(arena != nullptr);
|
||||
Assert(arena_is_bootstrapped(arena));
|
||||
// Only put into free table if arena is bootstrapped?
|
||||
std::lock_guard<std::mutex> lock(arena_table->mutex);
|
||||
s64 reserve_index = (s64)arena->reserve_size;
|
||||
|
||||
#if ARENA_DEBUG
|
||||
array_unordered_remove_by_value(arenas_in_flight[reserve_index], arena, 1);
|
||||
array_unordered_remove_by_value(arena_table->in_flight[reserve_index], arena, 1); // BUILD_DEBUG!
|
||||
#endif
|
||||
arena_reset_keeping_memory(arena);
|
||||
if (delete_extra_pages) {
|
||||
free_pages_down_to(arena, arena->initial_commit_page_count);
|
||||
}
|
||||
array_add(arena_free_table[reserve_index], arena);
|
||||
array_add(arena_table->free_table[reserve_index], arena);
|
||||
|
||||
arenas_in_flight_count[reserve_index] -= 1;
|
||||
|
||||
// #TODO: Garbage collection if we have >> 64 in a particular table for a while.
|
||||
//
|
||||
arena_table->in_flight_count[reserve_index] -= 1;
|
||||
/*
|
||||
// #TODO #garbage_collection
|
||||
if (arena_free_table[reserve_index].count > 64) {
|
||||
// release some arenas if required
|
||||
// arena_delete(...)
|
||||
// s64 arenas_to_delete_count = arena_free_table[reserve_index].count - 64.
|
||||
// while (arenas_to_delete_count > 0) {
|
||||
// arena_delete(arena_free_table[arena_free_table.count-1]);
|
||||
// array_unordered_remove_by_index(..);
|
||||
// arenas_to_delete_count -= 1;
|
||||
// }
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
@ -8,11 +8,12 @@ void platform_init (Arena* arena, s64 new_reserve) {
|
||||
void* address_start =
|
||||
VirtualAlloc(nullptr, (u64)page_aligned_reserve_size, MEM_RESERVE, PAGE_READWRITE);
|
||||
|
||||
// printf("Reserving range %p:%p (size: %llu)\n", address_start, (u8*)address_start + page_aligned_reserve_size, page_aligned_reserve_size);
|
||||
|
||||
if (address_start == nullptr) {
|
||||
// get error value and string?
|
||||
s32 error_code = GetLastError();
|
||||
printf("In Arena:platform_init, VirtualAlloc failed with code %d\n", error_code);
|
||||
return;
|
||||
// #TODO(LOG) log_error("In Arena:platform_init, VirtualAlloc failed with code %d\n", error_code)
|
||||
}
|
||||
|
||||
arena->memory_base = (u8*)address_start;
|
||||
@ -25,6 +26,7 @@ void extend_committed_pages (Arena* arena, u8* end) {
|
||||
s64 size = Align_To_Page_Size(delta);
|
||||
|
||||
VirtualAlloc(arena->first_uncommitted_page, (u64)size, MEM_COMMIT, PAGE_READWRITE);
|
||||
// printf("Committing range %p:%p (size: %llu)\n", arena->first_uncommitted_page, (u8*)arena->first_uncommitted_page + size, size);
|
||||
|
||||
// arena_lock_pages(arena, arena->first_uncommitted_page, size);
|
||||
arena->first_uncommitted_page += size;
|
||||
@ -32,9 +34,9 @@ void extend_committed_pages (Arena* arena, u8* end) {
|
||||
|
||||
void free_pages_down_to (Arena* arena, s64 pages_to_keep) {
|
||||
if (arena == nullptr) return;
|
||||
Assert(pages_to_keep >= 0);
|
||||
Assert(pages_to_keep >= 1); // Always keep one page because we bootstrap a lot.
|
||||
s64 bytes_to_keep = pages_to_keep * PLATFORM_MEMORY_PAGE_SIZE;
|
||||
if (bytes_to_keep >= reserve_size(arena)) {
|
||||
if (bytes_to_keep > reserve_size(arena)) {
|
||||
Assert(false); // Break in debug builds, but release we just do nothing.
|
||||
return; // just do nothing here. Maybe we should assert?
|
||||
}
|
||||
@ -48,7 +50,10 @@ void free_pages_down_to (Arena* arena, s64 pages_to_keep) {
|
||||
u64 bytes_to_decommit = (u64)(arena->first_uncommitted_page - start_address);
|
||||
// arena_unlock_pages(arena, start_address, (s64)bytes_to_decommit);
|
||||
|
||||
VirtualFree(start_address, bytes_to_decommit, MEM_DECOMMIT);
|
||||
if (bytes_to_decommit) {
|
||||
VirtualFree(start_address, bytes_to_decommit, MEM_DECOMMIT);
|
||||
// printf("Freeing pages %p:%p (size: %llu)\n", start_address, (u8*)start_address + bytes_to_decommit, bytes_to_decommit);
|
||||
}
|
||||
|
||||
arena->first_uncommitted_page = start_address;
|
||||
}
|
||||
@ -59,6 +64,9 @@ void arena_delete (Arena* arena) {
|
||||
|
||||
VirtualFree(arena->memory_base, 0, MEM_RELEASE);
|
||||
|
||||
s64 size_tmp = reserve_size(arena);
|
||||
// printf("Releasing range %p:%p (size: %llu)\n", arena->memory_base, (u8*)arena->memory_base + size_tmp, size_tmp);
|
||||
|
||||
if (!arena_was_boostrapped) {
|
||||
arena->memory_base = nullptr;
|
||||
}
|
||||
|
||||
129
lib/Base/Array.h
129
lib/Base/Array.h
@ -1,14 +1,7 @@
|
||||
// Strongly influenced by Array.jai in Basic module.
|
||||
#pragma once
|
||||
|
||||
// #TODO: Array.h
|
||||
// [x] Set allocations to use context.allocator interface
|
||||
// For now, I'm just disabling alignment:
|
||||
// [ ] Add back alignment, and make sure there's a way to pass alignment to NewArray, which gets passed to allocator.proc.
|
||||
// [ ] Make versions of ArrayView initializer that takes allocator as a param
|
||||
// [ ] Make version of array_free (ArrayView&) that takes allocator as a param
|
||||
// For Arena-Backed arrays use ArenaArray
|
||||
// #define DEFAULT_ARRAY_ALIGNMENT 16
|
||||
|
||||
MSVC_RUNTIME_CHECKS_OFF
|
||||
|
||||
@ -19,7 +12,6 @@ struct Array { // downcasts to an ArrayView.
|
||||
T* data;
|
||||
s64 allocated;
|
||||
Allocator allocator;
|
||||
// s64 alignment = DEFAULT_ARRAY_ALIGNMENT;
|
||||
|
||||
Array() {
|
||||
memset(this, 0, sizeof(*this));
|
||||
@ -32,15 +24,6 @@ struct Array { // downcasts to an ArrayView.
|
||||
allocated = new_count;
|
||||
}
|
||||
|
||||
// initializer-list type instantiation: `Array<T> new_array = {count, data}`
|
||||
// (Musa) This array cannot then be resized. Why do I even have this? Do I need it?
|
||||
// Array(s64 new_count, void* new_data) {
|
||||
// count = new_count;
|
||||
// data = (T*)new_data;
|
||||
// allocator = { nullptr, nullptr }; // NOT RESIZABLE.
|
||||
// allocated = new_count;
|
||||
// }
|
||||
|
||||
// Used by array_zero, array_copy, etc.
|
||||
Array(s64 new_count, void* new_data, s64 _allocated) {
|
||||
count = new_count;
|
||||
@ -70,7 +53,7 @@ template <typename T> bool is_resizable (Array<T>& src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool is_valid(Array<T> src) {
|
||||
bool is_valid (Array<T> src) {
|
||||
if (src.count == 0) return true;
|
||||
if (src.count < 0) return false;
|
||||
if (src.data == nullptr) return false;
|
||||
@ -79,7 +62,12 @@ bool is_valid(Array<T> src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Array<T> array_copy_zero(const Array<T>& src) {
|
||||
void array_zero (const Array<T>& src) {
|
||||
memset(src.data, 0, src.count * sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Array<T> array_copy_zero (const Array<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
return Array<T>(); // Return an empty array
|
||||
}
|
||||
@ -91,7 +79,7 @@ Array<T> array_copy_zero(const Array<T>& src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Array<T> array_copy(const Array<T>& src) {
|
||||
Array<T> array_copy (const Array<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
return Array<T>(); // Return an empty array
|
||||
}
|
||||
@ -103,12 +91,12 @@ Array<T> array_copy(const Array<T>& src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_reset_keeping_memory(Array<T>& src) {
|
||||
void array_reset_keeping_memory (Array<T>& src) {
|
||||
src.count = 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_free(Array<T>& src) {
|
||||
void array_free (Array<T>& src) {
|
||||
if (!src.data) return;
|
||||
if (src.allocated == 0) return;
|
||||
if (src.allocator.proc != nullptr) {
|
||||
@ -122,7 +110,7 @@ void array_free(Array<T>& src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_initialize(Array<T>& src, s64 start, s64 end) {
|
||||
void array_initialize (Array<T>& src, s64 start, s64 end) {
|
||||
for (s64 i = start; i < end; i += 1) {
|
||||
// Really this can be one ini followed by a bunch of memcpy.
|
||||
// For long arrays we could power-of-two double the copy out, etc.
|
||||
@ -131,7 +119,7 @@ void array_initialize(Array<T>& src, s64 start, s64 end) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_reserve(Array<T>& src, s64 desired_items) {
|
||||
void array_reserve (Array<T>& src, s64 desired_items) {
|
||||
if (desired_items <= src.allocated) return;
|
||||
|
||||
src.data = nullptr;
|
||||
@ -149,7 +137,7 @@ void array_reserve(Array<T>& src, s64 desired_items) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_resize(Array<T>& src, s64 new_count, bool initialize=true) {
|
||||
void array_resize (Array<T>& src, s64 new_count, bool initialize=true) {
|
||||
if (src.count == new_count) return;
|
||||
|
||||
s64 old_count = src.count;
|
||||
@ -161,7 +149,7 @@ void array_resize(Array<T>& src, s64 new_count, bool initialize=true) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
force_inline void array_maybe_grow(Array<T>& src) {
|
||||
force_inline void array_maybe_grow (Array<T>& src) {
|
||||
if (src.count >= src.allocated) {
|
||||
// Replace with Basic.max(8, 2 * src.count).
|
||||
s64 reserve = 8;
|
||||
@ -171,35 +159,37 @@ force_inline void array_maybe_grow(Array<T>& src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T pop(Array<T>& src) {
|
||||
T pop (Array<T>& src) {
|
||||
auto result = src[src.count-1];
|
||||
src.count -= 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void array_add(Array<T>& src, U new_item) {
|
||||
static_assert(sizeof(U) <= sizeof(T));
|
||||
auto new_count = src.count + 1;
|
||||
// template <typename T, typename U>
|
||||
// void array_add (Array<T>& src, U new_item) {
|
||||
// static_assert(sizeof(U) <= sizeof(T));
|
||||
// auto new_count = src.count + 1;
|
||||
// array_maybe_grow(src);
|
||||
|
||||
// T new_item_casted = (T)new_item;
|
||||
|
||||
// src.count += 1;
|
||||
// memcpy(&src[src.count-1], &new_item_casted, sizeof(T));
|
||||
// }
|
||||
|
||||
template <typename T>
|
||||
void array_add (Array<T>& src, T new_item) {
|
||||
array_maybe_grow(src);
|
||||
|
||||
T new_item_casted = (T)new_item;
|
||||
src.data[src.count] = new_item;
|
||||
|
||||
src.count += 1;
|
||||
memcpy(&src[src.count-1], &new_item_casted, sizeof(T));
|
||||
// auto dst_ptr = &src.data[src.count-1];
|
||||
// memcpy(dst_ptr, &new_item, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_add(Array<T>& src, T new_item) {
|
||||
auto new_count = src.count + 1;
|
||||
array_maybe_grow(src);
|
||||
|
||||
src.count += 1;
|
||||
memcpy(&src[src.count-1], &new_item, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
s64 array_find(Array<T>& src, T item) {
|
||||
s64 array_find (Array<T>& src, T item) {
|
||||
ForArray(i, src) {
|
||||
if (src[i] == item) return i;
|
||||
}
|
||||
@ -207,7 +197,7 @@ s64 array_find(Array<T>& src, T item) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_ordered_remove_by_index(Array<T>& src, s64 index) {
|
||||
void array_ordered_remove_by_index (Array<T>& src, s64 index) {
|
||||
Assert(index >= 0); Assert(index < src.count);
|
||||
|
||||
for (s64 i = index; i < src.count-1; i += 1) {
|
||||
@ -218,13 +208,13 @@ void array_ordered_remove_by_index(Array<T>& src, s64 index) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_ordered_remove_by_value(Array<T>& src, T item) {
|
||||
void array_ordered_remove_by_value (Array<T>& src, T item) {
|
||||
auto index = array_find(src, item);
|
||||
if (index != -1) { array_ordered_remove_by_index(src, index); }
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_unordered_remove_by_index(Array<T>& src, s64 index) {
|
||||
void array_unordered_remove_by_index (Array<T>& src, s64 index) {
|
||||
Assert(index >= 0); Assert(index < src.count);
|
||||
|
||||
auto last_index = src.count - 1;
|
||||
@ -237,15 +227,15 @@ void array_unordered_remove_by_index(Array<T>& src, s64 index) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
s64 array_unordered_remove_by_value(Array<T>& src, T item, s64 max_count_to_remove) {
|
||||
s64 array_unordered_remove_by_value (Array<T>& src, T item, s64 max_count_to_remove) {
|
||||
s64 removed_count = 0;
|
||||
|
||||
for (s64 i = 0; i < src.count; i += 1) {
|
||||
if (src[i] == item) {
|
||||
removed_count += 1;
|
||||
array_unordered_remove_by_index(src, i);
|
||||
debug_break(); // haven't quite figured this one out yet.
|
||||
i -= 1; // check this element again??
|
||||
|
||||
i -= 1; // check this element index again
|
||||
if (max_count_to_remove == removed_count) { break; }
|
||||
}
|
||||
}
|
||||
@ -259,6 +249,11 @@ struct ArrayView {
|
||||
s64 count;
|
||||
T* data;
|
||||
|
||||
ArrayView(Array<T> array) {
|
||||
count = array.count;
|
||||
data = array.data;
|
||||
}
|
||||
|
||||
ArrayView() { count = 0; data = nullptr; }
|
||||
|
||||
ArrayView(s64 new_count, bool initialize=true) {
|
||||
@ -281,7 +276,7 @@ struct ArrayView {
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
bool is_zero(ArrayView<T> src) {
|
||||
bool is_empty (ArrayView<T> src) {
|
||||
if (src.count == 0) return true;
|
||||
return false;
|
||||
}
|
||||
@ -290,26 +285,17 @@ bool is_zero(ArrayView<T> src) {
|
||||
// Whether or not this is an error is procedure specific, but for most
|
||||
// things, there is a default behavior that is expected.
|
||||
template <typename T>
|
||||
bool is_valid(ArrayView<T> src) {
|
||||
bool is_valid (ArrayView<T> src) {
|
||||
if (src.count < 0) return false;
|
||||
if (src.count == 0) return true;
|
||||
if (src.data == nullptr) return false;
|
||||
// #TODO: For debug builds we can use VirtualQuery to check if
|
||||
// all pages are writable, but that seems excessive for now.
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// can also use ArrayView<T>(count, data) for initialization!
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(s64 view_count, T* view_data) {
|
||||
ArrayView<T> av;
|
||||
av.count = view_count;
|
||||
av.data = view_data;
|
||||
return av;
|
||||
} // #unsafe, no abc
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(Array<T> array) {
|
||||
ArrayView<T> array_view (Array<T> array) {
|
||||
ArrayView<T> av;
|
||||
av.count = array.count;
|
||||
av.data = array.data;
|
||||
@ -317,7 +303,7 @@ ArrayView<T> array_view(Array<T> array) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(ArrayView<T> array, s64 start_index, s64 view_count) {
|
||||
ArrayView<T> array_view (ArrayView<T> array, s64 start_index, s64 view_count) {
|
||||
ArrayView<T> av;
|
||||
av.count = view_count; // check if count exceeds
|
||||
Assert(start_index + view_count <= array.count);
|
||||
@ -326,7 +312,7 @@ ArrayView<T> array_view(ArrayView<T> array, s64 start_index, s64 view_count) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(Array<T> array, s64 start_index, s64 view_count) {
|
||||
ArrayView<T> array_view (Array<T> array, s64 start_index, s64 view_count) {
|
||||
ArrayView<T> av;
|
||||
av.count = view_count; // check if count exceeds
|
||||
Assert(start_index + view_count <= array.count);
|
||||
@ -335,13 +321,13 @@ ArrayView<T> array_view(Array<T> array, s64 start_index, s64 view_count) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_reset_keeping_memory(ArrayView<T>& src) {
|
||||
void array_reset_keeping_memory (ArrayView<T>& src) {
|
||||
src.count = 0;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_copy(const ArrayView<T>& src) {
|
||||
ArrayView<T> array_copy (const ArrayView<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
return ArrayView<T>(); // Return an empty array
|
||||
}
|
||||
@ -353,17 +339,18 @@ ArrayView<T> array_copy(const ArrayView<T>& src) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_free(ArrayView<T>& src) {
|
||||
void array_free (ArrayView<T>& src) {
|
||||
if (!src.data || src.count == 0) { return; }
|
||||
|
||||
// Use with caution!
|
||||
internal_free(src.data); // we just have to trust that the context.allocator is correct for this guy!
|
||||
|
||||
src.count = 0;
|
||||
src.data = nullptr;
|
||||
}
|
||||
|
||||
// Usage: `auto array = array_from_values<s32>(6,7,8,9,10,51);`
|
||||
template <typename T, typename... ArgValues>
|
||||
Array<T> NewArrayFromValues(ArgValues... args) {
|
||||
Array<T> array_from_values (ArgValues... args) {
|
||||
constexpr s64 N = sizeof...(ArgValues);
|
||||
auto array = Array<T>(N, /*initialize:*/false);
|
||||
T values[] = {args...};
|
||||
@ -374,8 +361,10 @@ Array<T> NewArrayFromValues(ArgValues... args) {
|
||||
return array;
|
||||
}
|
||||
|
||||
|
||||
// Usage `auto view = array_view_from_values<s32>(1,2,3,4,5);`
|
||||
template <typename T, typename... ArgValues>
|
||||
ArrayView<T> NewArrayViewFromValues(ArgValues... args) {
|
||||
ArrayView<T> array_view_from_values (ArgValues... args) {
|
||||
constexpr s64 N = sizeof...(ArgValues);
|
||||
auto array = ArrayView<T>(N, /*initialize:*/false);
|
||||
T values[] = {args...};
|
||||
|
||||
@ -13,6 +13,10 @@
|
||||
#error "CPU not supported (yet)!"
|
||||
#endif
|
||||
|
||||
#include <stdio.h> // vsnprintf
|
||||
#include <cstdarg> // va_list, ...
|
||||
|
||||
|
||||
#if OS_WINDOWS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <Windows.h>
|
||||
@ -100,8 +104,6 @@ force_inline T Align (T value, s64 alignment) {
|
||||
return (T)intermediate;
|
||||
}
|
||||
|
||||
// #TODO: template this so it works with any pointer type
|
||||
// force_inline u8* Align_To_Cache_Line(u8* address)
|
||||
/*
|
||||
force_inline s64 Align_Forwards(s64 size, s64 alignment) {
|
||||
return (((size + alignment - 1) / alignment) * alignment);
|
||||
@ -167,9 +169,7 @@ force_inline s64 Next_Power_Of_Two(s64 v) {
|
||||
#endif
|
||||
|
||||
// ForExpansions. Not sure if this is a good idea...
|
||||
// #TODO: Maybe remove these. I prefer verbose and clear over this.
|
||||
#define For(_idx_, _until_) for (s64 _idx_ = 0; _idx_ < _until_; ++_idx_)
|
||||
#define ForBetween(_idx_, _start_, _until_) for (s64 _idx_ = _start_; _idx_ < _until_; ++_idx_)
|
||||
// ↓TODO(Low priority): Maybe remove these. I prefer verbose and clear over this.
|
||||
#define ForArray(_idx_, _array_) for (s64 _idx_ = 0; _idx_ < (_array_).count; ++_idx_)
|
||||
#define ForArrayStartingAt(_it_, _array_, _start_) for (s64 _it_ = _start_; _it_ < (_array_).count; _it_ += 1)
|
||||
#define ForUpTo(_it_, _end_) for (s64 _it_ = 0; _it_ < _end_; _it_ += 1)
|
||||
@ -178,12 +178,16 @@ force_inline s64 Next_Power_Of_Two(s64 v) {
|
||||
// usage `Auto_Reset guard(arena);` within a scope.
|
||||
#define auto_reset(x) \
|
||||
Auto_Reset Concat(_auto_reset_guard_, __LINE__)(x)
|
||||
#define push_allocator(x) Push_Allocator Concat(_push_alloc_guard_, __LINE__)(x)
|
||||
#define push_allocator(x) \
|
||||
Push_Allocator Concat(_push_alloc_guard_, __LINE__)(x)
|
||||
#define push_alignment(x, y) \
|
||||
Push_Alignment Concat(_push_align_guard_, __LINE__)(x, y)
|
||||
#define push_arena(x) \
|
||||
Push_Arena Concat(_push_alloc_guard_, __LINE__)(x)
|
||||
#define auto_release_temp() auto_release(get_temp_allocator());
|
||||
Push_Arena Concat(_push_arena_guard_, __LINE__)(x)
|
||||
#define push_expandable_arena(x) \
|
||||
Push_Expandable_Arena Concat(_push_ex_arena_guard_, __LINE__)(x)
|
||||
#define auto_release_temp() \
|
||||
auto_release(get_temp_allocator());
|
||||
#define auto_release(x) \
|
||||
Auto_Release Concat(_auto_release_guard_, __LINE__)(x)
|
||||
|
||||
|
||||
@ -1,29 +1,41 @@
|
||||
// See Context_Base in jai, and TCTX in raddebugger:
|
||||
struct Thread_Context {
|
||||
Arena* temp; // Used for temporary allocations, scratch space.
|
||||
Arena* arena; // general purpose local arena
|
||||
|
||||
Allocator allocator;
|
||||
s32 thread_idx;
|
||||
u16 _padding0;
|
||||
u16 GPAllocator_alignment = 16;
|
||||
// Logger logger;
|
||||
// Stack_Trace* stack_trace;
|
||||
// #TODO: other debug information
|
||||
// #TODO:
|
||||
// Array<Thread*> threads_created; // maybe should be linked-list?
|
||||
// Thread* thread_that_created_me = nullptr; // so we can remove from above array
|
||||
// Mutex thread_context_mutex;
|
||||
internal void Bootstrap_Main_Thread_Context () {
|
||||
// 0. Setup general allocator
|
||||
GPAllocator_Initialize_Allocation_Tracker();
|
||||
|
||||
string thread_name;
|
||||
};
|
||||
// 1. Setup arena table
|
||||
arena_table = (Arena_Table*)GPAllocator_New(sizeof(Arena_Table), 64, true); // permanent allocation.
|
||||
memset(arena_table, 0, sizeof(Arena_Table));
|
||||
initialize_arena_table(GPAllocator());
|
||||
|
||||
// 2. Setup thread local context
|
||||
ExpandableArena* arena_ex = expandable_arena_new(Arena_Reserve::Size_64M, 16);
|
||||
|
||||
thread_local_context = New<Thread_Context>(get_allocator(arena_ex));
|
||||
thread_local_context->temp = expandable_arena_new(Arena_Reserve::Size_2M, 16);
|
||||
thread_local_context->arena = arena_ex;
|
||||
thread_local_context->allocator = get_allocator(arena_ex);
|
||||
thread_local_context->thread_idx = 0;
|
||||
thread_local_context->thread_name = "Main Thread";
|
||||
thread_local_context->log_builder = new_string_builder(Arena_Reserve::Size_64M);
|
||||
|
||||
Thread_Context* get_thread_context();
|
||||
default_logger_initialize();
|
||||
thread_local_context->logger = {default_logger_proc, &default_logger};
|
||||
}
|
||||
|
||||
struct Push_Arena {
|
||||
Thread_Context* context;
|
||||
Allocator original_allocator;
|
||||
|
||||
Push_Arena(ExpandableArena* arena_ex) {
|
||||
Assert(is_valid(arena_ex));
|
||||
context = get_thread_context();
|
||||
Assert(context != nullptr);
|
||||
original_allocator = context->allocator;
|
||||
context->allocator = get_allocator(arena_ex);
|
||||
}
|
||||
|
||||
Push_Arena(Arena* arena) {
|
||||
Assert(is_valid(arena));
|
||||
context = get_thread_context();
|
||||
@ -37,30 +49,7 @@ struct Push_Arena {
|
||||
}
|
||||
};
|
||||
|
||||
struct Push_Allocator {
|
||||
Thread_Context* context;
|
||||
Allocator old_allocator;
|
||||
|
||||
Push_Allocator (Allocator new_allocator) {
|
||||
context = get_thread_context();
|
||||
old_allocator = context->allocator;
|
||||
context->allocator = new_allocator;
|
||||
}
|
||||
|
||||
~Push_Allocator () {
|
||||
context->allocator = old_allocator;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Thread-local allocators:
|
||||
PROTOTYPING_API Allocator get_temp_allocator();
|
||||
PROTOTYPING_API Allocator get_context_allocator();
|
||||
|
||||
|
||||
|
||||
// C_LINKAGE thread_static TCTX* tctx_thread_local;
|
||||
thread_static Thread_Context* thread_local_context;
|
||||
|
||||
// Start from w32_entry_point_caller ->
|
||||
// see main_thread_base_entry_point
|
||||
@ -76,11 +65,15 @@ force_inline void set_thread_context (Thread_Context* new_context) {
|
||||
thread_local_context = new_context;
|
||||
}
|
||||
|
||||
Thread_Context* get_thread_context() {
|
||||
Thread_Context* get_thread_context () {
|
||||
return (Thread_Context*)thread_local_context;
|
||||
}
|
||||
|
||||
force_inline Allocator get_temp_allocator() {
|
||||
Logger* get_context_logger () {
|
||||
return &get_thread_context()->logger;
|
||||
}
|
||||
|
||||
force_inline Allocator get_temp_allocator () {
|
||||
return get_allocator(get_thread_context()->temp);
|
||||
}
|
||||
|
||||
@ -91,10 +84,10 @@ force_inline Allocator get_context_allocator() {
|
||||
|
||||
void temp_reset_keeping_memory() {
|
||||
Thread_Context* context = get_thread_context();
|
||||
arena_reset_keeping_memory(context->temp);
|
||||
arena_reset(context->temp, false);
|
||||
}
|
||||
|
||||
void temp_reset() {
|
||||
Thread_Context* context = get_thread_context();
|
||||
arena_reset(context->temp);
|
||||
arena_reset(context->temp, true);
|
||||
}
|
||||
41
lib/Base/Base_Thread_Context.h
Normal file
41
lib/Base/Base_Thread_Context.h
Normal file
@ -0,0 +1,41 @@
|
||||
struct Thread; // hacky fwd declare
|
||||
|
||||
struct Thread_Context {
|
||||
ExpandableArena* temp; // Used for temporary allocations, scratch space.
|
||||
ExpandableArena* arena; // general purpose local arena
|
||||
|
||||
Allocator allocator;
|
||||
s32 thread_idx;
|
||||
u16 _padding0;
|
||||
u16 GPAllocator_alignment = 16;
|
||||
Logger logger = {nullptr, nullptr};
|
||||
String_Builder* log_builder;
|
||||
// Stack_Trace* stack_trace;
|
||||
|
||||
Array<Thread*> child_threads; // maybe should be linked-list?
|
||||
Thread* thread_that_created_me = nullptr; // so we can remove from above array
|
||||
|
||||
string thread_name;
|
||||
};
|
||||
|
||||
// C_LINKAGE thread_static TCTX* tctx_thread_local;
|
||||
thread_static Thread_Context* thread_local_context;
|
||||
|
||||
Thread_Context* get_thread_context ();
|
||||
|
||||
internal void Bootstrap_Main_Thread_Context ();
|
||||
|
||||
struct Push_Allocator {
|
||||
Thread_Context* context;
|
||||
Allocator old_allocator;
|
||||
|
||||
Push_Allocator (Allocator new_allocator) {
|
||||
context = get_thread_context();
|
||||
old_allocator = context->allocator;
|
||||
context->allocator = new_allocator;
|
||||
}
|
||||
|
||||
~Push_Allocator () {
|
||||
context->allocator = old_allocator;
|
||||
}
|
||||
};
|
||||
@ -1,483 +0,0 @@
|
||||
#include "Basic.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <math.h> // isnan, floor
|
||||
#include <stdlib.h> // qsort
|
||||
#include <cassert> // assert
|
||||
|
||||
|
||||
Native_Error* Basic_Difference2 (ArrayView<f64> input, ArrayView<f64>& output) {
|
||||
Array_Check(input);
|
||||
Array_Check(output);
|
||||
|
||||
// ensure enough room. Note output.count = input.count
|
||||
Assert(output.count >= input.count - 1);
|
||||
|
||||
ForUpTo(i, input.count-1) {
|
||||
output[i] = input[i + 1] - input[i];
|
||||
}
|
||||
|
||||
output.count = input.count - 1;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Mean2 (ArrayView<f64> input, f64* mean) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(mean);
|
||||
|
||||
f64 sum = 0;
|
||||
ForArray(i, input) {
|
||||
sum += input[i];
|
||||
}
|
||||
|
||||
(*mean) = (sum / (f64)input.count);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_QuickSortInPlace (ArrayView<f64> input) {
|
||||
Array_Check(input);
|
||||
|
||||
qsort(input.data, input.count, sizeof(double), qsort_doubles_comparator_nonnan);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Median2 (ArrayView<f64> unsorted_input, f64* median) {
|
||||
Array_Check(unsorted_input);
|
||||
Null_Pointer_Check(median);
|
||||
auto input_sorted = array_copy(unsorted_input);
|
||||
qsort(input_sorted.data, (u64)input_sorted.count, sizeof(f64), qsort_doubles_comparator_nonnan);
|
||||
|
||||
s64 middle_element_index = unsorted_input.count / 2;
|
||||
|
||||
if (unsorted_input.count % 2 == 1) {
|
||||
(*median) = input_sorted[middle_element_index];
|
||||
} else {
|
||||
(*median) = (input_sorted[middle_element_index - 1] + input_sorted[middle_element_index]) / 2.0;
|
||||
}
|
||||
|
||||
array_free(input_sorted);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_RescaleInPlace (ArrayView<f64> input, double min, double max) {
|
||||
Array_Check(input);
|
||||
if (max < min || max == min) { return New_Error("Min or max inputs are not valid!"); }
|
||||
|
||||
f64 smallest_element; f64 largest_element;
|
||||
auto error = Basic_Min2(input, &smallest_element);
|
||||
if (error != nullptr) return error;
|
||||
|
||||
error = Basic_Max2(input, &largest_element);
|
||||
if (error != nullptr) return error;
|
||||
|
||||
if (largest_element == smallest_element)
|
||||
return nullptr;
|
||||
|
||||
ForArray(i, input) {
|
||||
input[i] = (input[i] - smallest_element) / (largest_element - smallest_element) * (max - min) + min;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Min2 (ArrayView<f64> input, f64* min_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(min_out);
|
||||
|
||||
f64 min = input[0];
|
||||
ForArrayStartingAt(i, input, 1) {
|
||||
if (input[i] < min) {
|
||||
min = input[i];
|
||||
}
|
||||
}
|
||||
|
||||
(*min_out) = min;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Max2 (ArrayView<f64> input, f64* max_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(max_out);
|
||||
|
||||
f64 max = input[0];
|
||||
ForArrayStartingAt(i, input, 1) {
|
||||
if (input[i] > max) {
|
||||
max = input[i];
|
||||
}
|
||||
}
|
||||
|
||||
(*max_out) = max;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
double Basic_Max (double input1, double input2) {
|
||||
if (input1 > input2) return input1;
|
||||
else return input2;
|
||||
}
|
||||
|
||||
bool Basic_Is_Positive_Real (f32 input) {
|
||||
return (!(input <= 0.0 || isnan(input) || isinf(input)));
|
||||
}
|
||||
|
||||
bool Basic_Is_Positive_Real (f64 input) {
|
||||
return (!(input <= 0.0 || isnan(input) || isinf(input)));
|
||||
}
|
||||
|
||||
Native_Error* Basic_Standard_Deviation2 (ArrayView<f64> input, f64* stddev) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(stddev);
|
||||
|
||||
f64 mean = 0.0;
|
||||
Basic_Mean2(input, &mean);
|
||||
|
||||
f64 sum_of_squared_differences = 0;
|
||||
ForArray(i, input) {
|
||||
sum_of_squared_differences += (input[i] - mean) * (input[i] - mean);
|
||||
}
|
||||
|
||||
(*stddev) = sqrt(sum_of_squared_differences / (f64)input.count);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Variance2 (ArrayView<f64> input, f64* variance) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(variance);
|
||||
|
||||
f64 mean = 0.0;
|
||||
Basic_Mean2(input, &mean);
|
||||
|
||||
f64 sum_of_squared_differences = 0;
|
||||
ForArray(i, input) {
|
||||
sum_of_squared_differences += (input[i] - mean) * (input[i] - mean);
|
||||
}
|
||||
|
||||
f64 sample = 1;
|
||||
|
||||
(*variance) = (sum_of_squared_differences / (f64)(sample ? (input.count - 1) : input.count));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Root_Mean_Squared2 (ArrayView<f64> input, f64* rms) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(rms);
|
||||
|
||||
f64 square = 0;
|
||||
ForArray(i, input) {
|
||||
square += pow(input[i], 2);
|
||||
}
|
||||
f64 mean = (square / ((f64)input.count));
|
||||
|
||||
(*rms) = sqrt(mean);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_IndexSort2 (ArrayView<f64> input, ArrayView<s64> output) {
|
||||
Array_Check(input);
|
||||
Array_Check(output);
|
||||
|
||||
ForArray(i, input) { output[i] = i; }
|
||||
ForArray(i, input) {
|
||||
for (s64 j = i; j > 0; j -= 1) {
|
||||
if (input[output[j]] > input[output[j-1]]) {
|
||||
s64 temp = output[j];
|
||||
output[j] = output[j - 1];
|
||||
output[j - 1] = temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Count_Non_Nan2 (ArrayView<f64> input, s64* non_nan_count) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(non_nan_count);
|
||||
|
||||
s64 count = 0;
|
||||
|
||||
ForArray(i, input) {
|
||||
if (!isnan(input[i])) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
(*non_nan_count) = count;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Calculate_Percentile_New (ArrayView<f64> input, f64 percentile, f64* percentile_value_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(percentile_value_out);
|
||||
|
||||
Assert(percentile >= 0.0 && percentile <= 1.0);
|
||||
|
||||
qsort(input.data, input.count, sizeof(f64), qsort_doubles_comparator);
|
||||
s64 non_nan_count = 0;
|
||||
Assert(Basic_Count_Non_Nan2(input, &non_nan_count) == nullptr);
|
||||
|
||||
if (non_nan_count == 0) {
|
||||
(*percentile_value_out) = NAN;
|
||||
return New_Warning("All values in the input array are `NAN`!");
|
||||
}
|
||||
|
||||
auto r = percentile * non_nan_count;
|
||||
auto k = floor(r + 0.5);
|
||||
|
||||
auto kp1 = k + 1;
|
||||
|
||||
// Ratio between the K and K+1 rows:
|
||||
r = r - k;
|
||||
|
||||
// Find indices that are out of the range 1 to n and cap them:
|
||||
if (k < 1 || isnan(k)) {
|
||||
k = 1;
|
||||
}
|
||||
|
||||
// kp1 = min( kp1, n );
|
||||
if (non_nan_count < kp1) { kp1 = (f64)non_nan_count; }
|
||||
|
||||
// Use simple linear interpolation for the valid percentages:
|
||||
// y = (0.5+r).*x(kp1,:)+(0.5-r).*x(k,:); // yuck.
|
||||
s64 kp1_i = static_cast<s64>(kp1);
|
||||
s64 k_i = static_cast<s64>(k);
|
||||
|
||||
f64 y_first_part = (0.5 + r) * input[kp1_i - 1];
|
||||
f64 y_second_part = (0.5 - r) * input[k_i - 1];
|
||||
auto y = y_first_part + y_second_part;
|
||||
|
||||
// Make sure that values we hit exactly are copied rather than interpolated:
|
||||
if (r == -0.5) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Make sure that identical values are copied rather than interpolated:
|
||||
if (input[k_i-1] == input[kp1_i-1]) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
(*percentile_value_out) = y;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_ReverseArrayInPlace (ArrayView<f64> input) {
|
||||
Array_Check(input);
|
||||
ForUpTo(i, input.count/2) {
|
||||
f64 temp = input[i];
|
||||
input[i] = input[input.count - i - 1];
|
||||
input[input.count - i - 1] = temp;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Native_Error* Basic_Reverse_Array (int* input, int input_length) {
|
||||
// for (int i = 0; i < input_length / 2; i++) {
|
||||
// // Swap the ith and (input_length - i - 1)th elements
|
||||
// int temp = input[i];
|
||||
// input[i] = input[input_length - i - 1];
|
||||
// input[input_length - i - 1] = temp;
|
||||
// }
|
||||
|
||||
// return nullptr;
|
||||
// }
|
||||
|
||||
// Native_Error* Basic_Reverse_Array (double* input, int input_length) {
|
||||
// for (int i = 0; i < input_length / 2; i++) {
|
||||
// // Swap the ith and (input_length - i - 1)th elements
|
||||
// double temp = input[i];
|
||||
// input[i] = input[input_length - i - 1];
|
||||
// input[input_length - i - 1] = temp;
|
||||
// }
|
||||
|
||||
// return nullptr;
|
||||
// }
|
||||
|
||||
// #TODO: This should be for NDArray or 2DArray. idk.
|
||||
Native_Error* Basic_2DArrayInvertMemoryOrder (ArrayView<f64> input, s64 first_dimension, s64 second_dimension, ArrayView<f64> output) {
|
||||
Array_Check(input);
|
||||
Array_Check(output);
|
||||
if (output.count < input.count) { return New_Error("`input.count` should not exceed `output.count`!"); }
|
||||
Assert(first_dimension * second_dimension == input.count);
|
||||
Assert(input.count == output.count);
|
||||
|
||||
ForUpTo(i, first_dimension) {
|
||||
ForUpTo(j, second_dimension) {
|
||||
output[j + second_dimension * i] = input[i + first_dimension * j];
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool sort_doubles_comparator(double a, double b) {
|
||||
if (isnan(a)) return false; // NaN values are considered greater
|
||||
if (isnan(b)) return true; // Non-NaN values are considered smaller
|
||||
return a < b; // Normal comparison for non-NaN values
|
||||
}
|
||||
|
||||
int qsort_doubles_comparator_nonnan(const void* a, const void* b) {
|
||||
double val1 = (*(const double*)a);
|
||||
double val2 = (*(const double*)b);
|
||||
|
||||
if (val1 < val2) return -1;
|
||||
if (val1 > val2) return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qsort_doubles_comparator(const void* a, const void* b) {
|
||||
double val1 = (*(const double*)a);
|
||||
double val2 = (*(const double*)b);
|
||||
if (isnan(val1)) return 1; // NaN values are considered greater
|
||||
if (isnan(val2)) return -1; // Non-NaN values are considered smaller
|
||||
|
||||
if (val1 < val2) return -1;
|
||||
if (val1 > val2) return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Native_Error* Basic_CalculatePercentileNoSort (ArrayView<f64> input, f64 percentile, f64* percentile_value_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(percentile_value_out);
|
||||
|
||||
Assert(percentile >= 0.0 && percentile <= 1.0);
|
||||
|
||||
s64 non_nan_count = input.count;
|
||||
|
||||
auto r = percentile * non_nan_count;
|
||||
auto k = floor(r + 0.5);
|
||||
|
||||
auto kp1 = k + 1;
|
||||
|
||||
// Ratio between the K and K+1 rows:
|
||||
r = r - k;
|
||||
|
||||
// Find indices that are out of the range 1 to n and cap them:
|
||||
if (k < 1 || isnan(k)) {
|
||||
k = 1;
|
||||
}
|
||||
|
||||
// kp1 = min( kp1, n );
|
||||
if (non_nan_count < kp1) { kp1 = (f64)non_nan_count; }
|
||||
|
||||
// Use simple linear interpolation for the valid percentages:
|
||||
// y = (0.5+r).*x(kp1,:)+(0.5-r).*x(k,:); // yuck.
|
||||
s64 kp1_i = static_cast<s64>(kp1);
|
||||
s64 k_i = static_cast<s64>(k);
|
||||
|
||||
f64 y_first_part = (0.5 + r) * input[kp1_i - 1];
|
||||
f64 y_second_part = (0.5 - r) * input[k_i - 1];
|
||||
auto y = y_first_part + y_second_part;
|
||||
|
||||
// Make sure that values we hit exactly are copied rather than interpolated:
|
||||
if (r == -0.5) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Make sure that identical values are copied rather than interpolated:
|
||||
if (input[k_i-1] == input[kp1_i-1]) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
(*percentile_value_out) = y;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Replace_Outliers2 (ArrayView<f64> input, f64 outlier_threshold) {
|
||||
Array_Check(input);
|
||||
Assert(outlier_threshold > 0);
|
||||
|
||||
auto input_copy = array_copy(input);
|
||||
|
||||
qsort(input_copy.data, input_copy.count, sizeof(f64), qsort_doubles_comparator_nonnan);
|
||||
f64 Q1 = 0.0;
|
||||
f64 Q3 = 0.0;
|
||||
|
||||
Assert(Basic_CalculatePercentileNoSort(input_copy, 0.25, &Q1) == nullptr);
|
||||
Assert(Basic_CalculatePercentileNoSort(input_copy, 0.75, &Q3) == nullptr);
|
||||
|
||||
f64 IQR = Q3 - Q1;
|
||||
f64 iqr_outlier_threshold = IQR * outlier_threshold;
|
||||
|
||||
// Identify points below Q1 - outlier_threshold, and above Q3 + outlier_threshold
|
||||
auto low_threshold = Q1 - iqr_outlier_threshold;
|
||||
auto high_threshold = Q3 + iqr_outlier_threshold;
|
||||
|
||||
ForArrayStartingAt(i, input, 1) {
|
||||
if (input[i] < low_threshold || input[i] > high_threshold) {
|
||||
input[i] = input[i-1];
|
||||
}
|
||||
}
|
||||
|
||||
array_free(input_copy);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Replace_Values_Beyond_Threshold2 (ArrayView<f64> input, f64 low_threshold, f64 high_threshold, f64 replacement_value) {
|
||||
Array_Check(input);
|
||||
|
||||
ForArray(i, input) {
|
||||
if (input[i] < low_threshold || input[i] > high_threshold) {
|
||||
input[i] = replacement_value;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
/* // #TODO: Replace with version that doesn't use Eigen
|
||||
Native_Error* Basic_Roots_To_Polynomials2 (ArrayView<f64> roots, ArrayView<f64> polynomials) {
|
||||
Array_Check(roots);
|
||||
|
||||
s64 root_count = roots.count;
|
||||
|
||||
if (root_count == 0) { return New_Error("`roots.count` is zero!"); }
|
||||
if (polynomials.count < root_count + 1) {
|
||||
return New_Error("`polynomials.count` should be roots.count + 1!");
|
||||
}
|
||||
|
||||
// For real roots
|
||||
Eigen::VectorXd roots_vec = Eigen::Map<Eigen::VectorXd>(roots.data, root_count);
|
||||
// c = [1 zeros(1,n,class(x))];
|
||||
Eigen::VectorXd c = Eigen::VectorXd::Zero(root_count + 1);
|
||||
c[0] = 1.0;
|
||||
// for j = 1:n
|
||||
// c[1] = c[1] - roots_vec[0] * c[0]; // Extract first index
|
||||
ForArray(i, roots) {
|
||||
// c(2:(j+1)) = c(2:(j+1)) - e(j).*c(1:j);
|
||||
Eigen::VectorXd val_temp = c.segment(1, i + 1) - roots_vec[i] * c.segment(0, i + 1);
|
||||
c.segment(1, i + 1) = val_temp;
|
||||
}
|
||||
// The result should be real if the roots are complex conjugates.
|
||||
memcpy(polynomials.data, c.data(), (root_count + 1) * sizeof(f64));
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
*/
|
||||
|
||||
Complex exponential (Complex cx) {
|
||||
f64 e = std::exp(cx.real);
|
||||
return Complex(e * std::cos(cx.imag), e * std::sin(cx.imag));
|
||||
}
|
||||
|
||||
Complex conjugate (Complex cx) {
|
||||
return Complex(cx.real, -cx.imag);
|
||||
}
|
||||
|
||||
f64 fabs(Complex cx) {
|
||||
return sqrt(cx.real * cx.real + cx.imag * cx.imag);
|
||||
}
|
||||
145
lib/Base/Basic.h
145
lib/Base/Basic.h
@ -1,145 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
int qsort_doubles_comparator_nonnan(const void* a, const void* b);
|
||||
int qsort_doubles_comparator(const void* a, const void* b);
|
||||
|
||||
// @brief Calculates difference and approximate derivative for 1-dimensional data
|
||||
// Caller needs to supply memory for output and understand that output_length = input_length - 1
|
||||
PROTOTYPING_API Native_Error* Basic_Difference2 (ArrayView<f64> input, ArrayView<f64>& output);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Mean2 (ArrayView<f64> input, f64* mean);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_QuickSortInPlace (ArrayView<f64> input);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Median2 (ArrayView<f64> unsorted_input, f64* median);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_RescaleInPlace (ArrayView<f64> input, double min, double max);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Min2 (ArrayView<f64> input, f64* min_out);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Max2 (ArrayView<f64> input, f64* max_out);
|
||||
|
||||
double Basic_Max (double input1, double input2);
|
||||
|
||||
bool Basic_Is_Positive_Real (f32 input);
|
||||
bool Basic_Is_Positive_Real (f64 input);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Standard_Deviation2 (ArrayView<f64> input, f64* stddev);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Variance2 (ArrayView<f64> input, f64* variance);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Root_Mean_Squared2 (ArrayView<f64> input, f64* rms);
|
||||
|
||||
// Sorts an array from largest to smallest, returning the indices of the sorted array
|
||||
PROTOTYPING_API Native_Error* Basic_IndexSort2 (ArrayView<f64> input, ArrayView<s64> output);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Count_Non_Nan2 (ArrayView<f64> input, s64* non_nan_count);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Calculate_Percentile_New (ArrayView<f64> input, f64 percentile, f64* percentile_value_out);
|
||||
|
||||
// Does not include sort, because sorting is slow, and we may need to call this multiple
|
||||
// times with the same sorted input.
|
||||
PROTOTYPING_API Native_Error* Basic_CalculatePercentileNoSort (ArrayView<f64> input, f64 percentile, f64* percentile_value_out);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_ReverseArrayInPlace (ArrayView<f64> input);
|
||||
// Native_Error* Basic_Reverse_Array (double* input, int input_length);
|
||||
// Native_Error* Basic_Reverse_Array (int* input, int input_length);
|
||||
|
||||
// Switches from row-order to column-order or vice-versa. #NOTE: you must know what the order
|
||||
// and dimensions of the data are to begin with!!
|
||||
PROTOTYPING_API Native_Error* Basic_2DArrayInvertMemoryOrder (ArrayView<f64> input, s64 first_dimension, s64 second_dimension, ArrayView<f64> output);
|
||||
|
||||
// In-place replacement of outliers (using interquartile method, with threshold of 1.5) with nearest values.
|
||||
PROTOTYPING_API Native_Error* Basic_Replace_Outliers2 (ArrayView<f64> input, f64 outlier_threshold=1.5);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Replace_Values_Beyond_Threshold2 (ArrayView<f64> input, f64 low_threshold, f64 high_threshold, f64 replacement_value);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Roots_To_Polynomials2 (ArrayView<f64> roots, ArrayView<f64> polynomials);
|
||||
|
||||
// #TODO: Basic_Find (returns indices of non-zero elements).
|
||||
// Need to make this generic, maybe using templates?
|
||||
// PROTOTYPING_API ArrayView<s32> Basic_Find(ArrayView<f64> x, void* condition);
|
||||
|
||||
// Add parameters for peak prominence, height, etc.
|
||||
// PROTOTYPING_API Native_Error* Basic_Find_Peaks (double* input, int input_length, int* peak_indices, int* peak_count);
|
||||
|
||||
struct Complex {
|
||||
f64 real; f64 imag;
|
||||
|
||||
Complex() { real = 0; imag = 0; }
|
||||
Complex(f64 _real) { real = _real; imag = 0; }
|
||||
Complex(f64 _real, f64 _imag) { real = _real; imag = _imag; }
|
||||
|
||||
Complex operator+(const Complex& other) const {
|
||||
return Complex(real + other.real, imag + other.imag);
|
||||
}
|
||||
|
||||
Complex operator-(const Complex& other) const {
|
||||
return Complex(real - other.real, imag - other.imag);
|
||||
}
|
||||
|
||||
Complex operator*(const Complex& other) const {
|
||||
return Complex(
|
||||
real * other.real - imag * other.imag,
|
||||
real * other.imag + imag * other.real
|
||||
);
|
||||
}
|
||||
|
||||
Complex operator/(const Complex& other) const {
|
||||
f64 denom = other.real * other.real + other.imag * other.imag;
|
||||
return Complex(
|
||||
(real * other.real + imag * other.imag) / denom,
|
||||
(imag * other.real - real * other.imag) / denom
|
||||
);
|
||||
}
|
||||
|
||||
Complex& operator+=(const Complex& other) {
|
||||
real += other.real;
|
||||
imag += other.imag;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Complex& operator-=(const Complex& other) {
|
||||
real -= other.real;
|
||||
imag -= other.imag;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Complex& operator*=(const Complex& other) {
|
||||
f64 r = real * other.real - imag * other.imag;
|
||||
f64 i = real * other.imag + imag * other.real;
|
||||
real = r;
|
||||
imag = i;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Complex& operator/=(const Complex& other) {
|
||||
f64 denom = other.real * other.real + other.imag * other.imag;
|
||||
f64 r = (real * other.real + imag * other.imag) / denom;
|
||||
f64 i = (imag * other.real - real * other.imag) / denom;
|
||||
real = r;
|
||||
imag = i;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const Complex& other) const {
|
||||
return real == other.real && imag == other.imag;
|
||||
}
|
||||
|
||||
bool operator!=(const Complex& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
struct Complex32 { f32 real; f32 imag; };
|
||||
|
||||
Complex exponential (Complex cx);
|
||||
Complex conjugate (Complex cx);
|
||||
f64 fabs (Complex cx);
|
||||
@ -1,166 +0,0 @@
|
||||
enum ErrorSeverity: s32 {
|
||||
SEVERITY_WARNING = 0,
|
||||
SEVERITY_NON_FATAL = 1,
|
||||
SEVERITY_FATAL = 2
|
||||
};
|
||||
|
||||
// typedef struct string Native_Error;
|
||||
// Note: Native_Error should down-cast to a string.
|
||||
struct Native_Error {
|
||||
s64 count;
|
||||
u8* data;
|
||||
ErrorSeverity severity = SEVERITY_WARNING;
|
||||
};
|
||||
|
||||
#define Null_Pointer_Check(arg) \
|
||||
if (arg == nullptr) { \
|
||||
return New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s is a null pointer.", __FILE__, __LINE__, __FUNCTION__, Stringify(arg)); \
|
||||
}
|
||||
|
||||
#define Array_Check(arg) \
|
||||
if (!is_valid(arg)) { \
|
||||
return New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s is not a valid array.", __FILE__, __LINE__, __FUNCTION__, Stringify(arg)); \
|
||||
}
|
||||
|
||||
#define String_Check(arg) \
|
||||
if (!Is_Valid(arg)) { return New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s is not a valid string.", __FILE__, __LINE__, __FUNCTION__, Stringify(arg)); }
|
||||
|
||||
#define Error_Check(error) \
|
||||
if (error != nullptr) { \
|
||||
return error; \
|
||||
}
|
||||
|
||||
// An error from which the program cannot continue (e.g. a segmentation fault)
|
||||
#define New_Fatal_Error(message) \
|
||||
New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s.", __FILE__, __LINE__, __FUNCTION__, message)
|
||||
|
||||
#define New_Error(message) \
|
||||
New_Error_Internal("%s:%d\n[%s] Error: %s.", __FILE__, __LINE__, __FUNCTION__, message)
|
||||
|
||||
#define New_Warning(message) \
|
||||
New_Warning_Internal("%s:%d\n[%s] Warning: %s.", __FILE__, __LINE__, __FUNCTION__, message)
|
||||
|
||||
Native_Error* New_Fatal_Error_Internal(char* raw_message, ...);
|
||||
|
||||
Native_Error* New_Error_Internal(char* raw_message, ...);
|
||||
|
||||
Native_Error* New_Warning_Internal(char* raw_message, ...);
|
||||
|
||||
Native_Error* Native_Error_Callstack(Native_Error* new_error, Native_Error* old_error, ErrorSeverity severity);
|
||||
|
||||
PROTOTYPING_API C_API Native_Error* Cleanup_Error(Native_Error* error);
|
||||
|
||||
PROTOTYPING_API C_API Native_Error* Native_Error_Test();
|
||||
|
||||
|
||||
#include "General_Purpose_Allocator.h"
|
||||
#include <stdio.h> // vsnprintf, printf
|
||||
#include <cstdarg> // va_list...
|
||||
|
||||
#define BREAK_ON_WARNINGS 0
|
||||
#define BREAK_ON_ERRORS 0
|
||||
#define BREAK_ON_FATAL_ERROR BUILD_DEBUG
|
||||
#define ALWAYS_PRINT_ERROR_MESSAGES BUILD_DEBUG
|
||||
|
||||
Native_Error* Create_New_Native_Error_Internal(char* format, va_list args) {
|
||||
constexpr s64 ERROR_BUFFER_COUNT = 512;
|
||||
|
||||
// push_allocator(GPAllocator());
|
||||
|
||||
auto error = New<Native_Error>(false);
|
||||
error->data = (u8*)GPAllocator_New(ERROR_BUFFER_COUNT);
|
||||
|
||||
// You MUST copy the va_list before using it more than once
|
||||
va_list args_copy;
|
||||
va_copy(args_copy, args);
|
||||
error->count = (s64)vsnprintf((char*)error->data, (size_t)ERROR_BUFFER_COUNT, format, args_copy);
|
||||
va_end(args_copy);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* New_Fatal_Error_Internal(char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
auto error = Create_New_Native_Error_Internal(format, args);
|
||||
va_end(args);
|
||||
|
||||
error->severity = SEVERITY_FATAL;
|
||||
#if BUILD_DEBUG && ALWAYS_PRINT_ERROR_MESSAGES
|
||||
printf("[FATAL ERROR] %.*s\n", (s32)error->count, (char*)error->data);
|
||||
#endif
|
||||
#if BREAK_ON_FATAL_ERROR
|
||||
debug_break();
|
||||
#endif
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* Native_Error_Callstack(Native_Error* new_error, Native_Error* old_error, ErrorSeverity severity) {
|
||||
// push_allocator(GPAllocator());
|
||||
|
||||
auto error_message = format_string("%s\n > %s", new_error->data, old_error->data).data;
|
||||
|
||||
Cleanup_Error(new_error);
|
||||
Cleanup_Error(old_error);
|
||||
|
||||
Native_Error* error_merged = New<Native_Error>(false);
|
||||
error_merged->data = (u8*)error_message;
|
||||
error_merged->count = strlen((char*)error_merged->data);
|
||||
error_merged->severity = severity;
|
||||
|
||||
return error_merged;
|
||||
}
|
||||
|
||||
Native_Error* Native_Error_Test() {
|
||||
// This is quite verbose, but w/e
|
||||
auto old_error = New_Error("Original error...");
|
||||
auto new_message = format_string("Failed to start stream. Error Code: %d", -1).data;
|
||||
auto new_error = New_Error(new_message);
|
||||
GPAllocator_Delete(new_message);
|
||||
|
||||
return Native_Error_Callstack(new_error, old_error, SEVERITY_NON_FATAL);
|
||||
}
|
||||
|
||||
Native_Error* New_Error_Internal(char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
auto error = Create_New_Native_Error_Internal(format, args);
|
||||
va_end(args);
|
||||
|
||||
error->severity = SEVERITY_NON_FATAL;
|
||||
#if BUILD_DEBUG && ALWAYS_PRINT_ERROR_MESSAGES
|
||||
printf("[ERROR (NON-FATAL)] %.*s\n", (s32)error->count, (char*)error->data);
|
||||
#endif
|
||||
#if BREAK_ON_ERRORS
|
||||
debug_break();
|
||||
#endif
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* New_Warning_Internal(char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
auto error = Create_New_Native_Error_Internal(format, args);
|
||||
va_end(args);
|
||||
|
||||
error->severity = SEVERITY_WARNING;
|
||||
#if BUILD_DEBUG && ALWAYS_PRINT_ERROR_MESSAGES
|
||||
printf("[WARNING] %.*s\n", (s32)error->count, (char*)error->data);
|
||||
#endif
|
||||
#if BREAK_ON_WARNINGS
|
||||
debug_break();
|
||||
#endif
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* Cleanup_Error(Native_Error* error) {
|
||||
if (error == nullptr) return nullptr;
|
||||
|
||||
GPAllocator_Delete(error->data);
|
||||
GPAllocator_Delete(error);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
30
lib/Base/ErrorType.cpp
Normal file
30
lib/Base/ErrorType.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
// #NOTE: To keep things simple, all allocations for Error should be via GPAllocator.
|
||||
// We really allocate two things: the Error struct and the error string copy.
|
||||
|
||||
enum class ErrorClass: s32 {
|
||||
NONE = 0, // should not be used, just to avoid a default value being assigned.
|
||||
WARNING = 1,
|
||||
ERROR = 2,
|
||||
FATAL = 3
|
||||
};
|
||||
|
||||
// #downcasts to string
|
||||
struct Error {
|
||||
s64 count;
|
||||
u8* data;
|
||||
ErrorClass severity = ErrorClass::NONE;
|
||||
Error* previous_error; // if we're passing errors up the callstack.
|
||||
|
||||
Arena* arena;
|
||||
};
|
||||
|
||||
string to_string (Error error) {
|
||||
return { error.count, error.data };
|
||||
}
|
||||
|
||||
// Will need to use __FILE__ and __LINE__ macros
|
||||
|
||||
// Error* new_error (string error_message, ErrorClass severity, Error* previous_error=nullptr);
|
||||
// Error* append_error (Error* old_error, Error* new_error);
|
||||
// void context_report_error (Error* error);
|
||||
// void cleanup_error (Error* error);
|
||||
122
lib/Base/Expandable_Arena.cpp
Normal file
122
lib/Base/Expandable_Arena.cpp
Normal file
@ -0,0 +1,122 @@
|
||||
ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve, s32 commit_page_count) {
|
||||
ExpandableArena* new_arena = (ExpandableArena*)bootstrap_arena(starting_reserve, commit_page_count);
|
||||
// Note: beyond first 32 bytes ExpandableArena will not be initialized, so we do it here:
|
||||
new_arena->current = (Arena*)new_arena;
|
||||
|
||||
new_arena->current_point = expandable_arena_start(new_arena);
|
||||
new_arena->next_arenas = Array<Arena*>(); // next_arenas will be uninitialized, so we have to do this
|
||||
// We have to use malloc because if we reset this new arena, all the data will be lost
|
||||
// We don't want to tie the lifetime of next_arenas to this expandable arena.
|
||||
new_arena->next_arenas.allocator = GPAllocator();
|
||||
|
||||
array_reserve(new_arena->next_arenas, 8);
|
||||
|
||||
return new_arena;
|
||||
}
|
||||
|
||||
void* expandable_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
|
||||
ExpandableArena* arena = (ExpandableArena*)allocator_data;
|
||||
Assert(arena != nullptr);
|
||||
switch (mode) {
|
||||
case Allocator_Mode::ALLOCATE: {
|
||||
return expandable_arena_alloc(arena, requested_size);
|
||||
} break;
|
||||
case Allocator_Mode::RESIZE: {
|
||||
// See note :ArenaResizing
|
||||
void* new_memory = expandable_arena_alloc(arena, requested_size);
|
||||
memcpy(new_memory, old_memory, old_size);
|
||||
return new_memory;
|
||||
} break;
|
||||
case Allocator_Mode::DEALLOCATE:
|
||||
return nullptr;
|
||||
break;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool is_valid (ExpandableArena* arena) {
|
||||
return (arena != nullptr)
|
||||
&& (arena->memory_base != nullptr)
|
||||
&& (arena->current != nullptr);
|
||||
}
|
||||
|
||||
void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count) {
|
||||
Assert(arena_ex != nullptr);
|
||||
Assert(arena_ex->memory_base != nullptr); // must be initialized before calling.
|
||||
Assert(is_valid(arena_ex));
|
||||
Assert(arena_table->initialized);
|
||||
|
||||
Arena* arena = (Arena*)arena_ex->current;
|
||||
|
||||
u8* result = Align<u8*>(arena->current_point, arena->alignment);
|
||||
u8* result_end = result + byte_count;
|
||||
|
||||
if (result_end > arena->first_uncommitted_page) {
|
||||
if (result_end > arena_address_limit(arena)) {
|
||||
// Pick an appropriate reserve size that will fit this allocation.
|
||||
Arena_Reserve new_min_reserve = next_reserve_size(byte_count);
|
||||
if (arena->reserve_size > new_min_reserve) {
|
||||
new_min_reserve = arena->reserve_size;
|
||||
}
|
||||
|
||||
Arena* new_arena = next_arena(new_min_reserve);
|
||||
|
||||
new_arena->alignment = arena_ex->alignment;
|
||||
new_arena->flags = arena_ex->flags;
|
||||
|
||||
arena_ex->current = new_arena;
|
||||
array_add(arena_ex->next_arenas, new_arena);
|
||||
|
||||
// Allocate from new arena, committing pages as required
|
||||
result = Align<u8*>(new_arena->current_point, new_arena->alignment);
|
||||
result_end = result + byte_count;
|
||||
|
||||
if (result_end > arena_address_limit(new_arena)) {
|
||||
extend_committed_pages(new_arena, result_end);
|
||||
}
|
||||
} else {
|
||||
extend_committed_pages(arena, result_end);
|
||||
}
|
||||
}
|
||||
|
||||
arena_ex->current->current_point = result_end;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
u8* expandable_arena_start (ExpandableArena* arena_ex) {
|
||||
return Align(arena_ex->memory_base + sizeof(ExpandableArena), ARENA_DEFAULT_ALIGNMENT);
|
||||
}
|
||||
|
||||
Allocator get_allocator (ExpandableArena* arena_ex) {
|
||||
return { expandable_arena_allocator_proc, arena_ex };
|
||||
}
|
||||
|
||||
void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages) {
|
||||
if (!is_valid(arena_ex)) return;
|
||||
|
||||
// Free arenas in `next_arenas`
|
||||
for (s64 i = 0; i < arena_ex->next_arenas.count; i += 1) {
|
||||
release_arena(arena_ex->next_arenas[i], free_extra_pages);
|
||||
}
|
||||
|
||||
// Reset next_arenas
|
||||
#if BUILD_DEBUG
|
||||
array_zero(arena_ex->next_arenas);
|
||||
#endif
|
||||
array_reset_keeping_memory(arena_ex->next_arenas);
|
||||
|
||||
arena_ex->current = (Arena*)arena_ex;
|
||||
arena_ex->current_point = expandable_arena_start(arena_ex);
|
||||
|
||||
if (free_extra_pages) {
|
||||
free_pages_down_to((Arena*)arena_ex, arena_ex->initial_commit_page_count);
|
||||
}
|
||||
}
|
||||
|
||||
force_inline void arena_delete (ExpandableArena* arena_ex) {
|
||||
array_free(arena_ex->next_arenas);
|
||||
arena_reset(arena_ex, true);
|
||||
arena_delete((Arena*)arena_ex);
|
||||
}
|
||||
28
lib/Base/Expandable_Arena.h
Normal file
28
lib/Base/Expandable_Arena.h
Normal file
@ -0,0 +1,28 @@
|
||||
// Just an idea I had so that we can start with a small arena and increase on an as-needed basis,
|
||||
// this way allocations are always extremely fast.
|
||||
// The full structure is 80B, and because we use bootstrapping, we only need a little extra memory for storing
|
||||
// the `next_arenas` pointers
|
||||
|
||||
// DO NOT MERGE WITH `Arena`, we need fixed size arenas so that we can back
|
||||
// `ArenaArray`s.
|
||||
struct ExpandableArena {
|
||||
u8* current_point = nullptr;
|
||||
u8* memory_base = nullptr;
|
||||
u8* first_uncommitted_page = nullptr;
|
||||
u16 alignment = CPU_REGISTER_WIDTH_BYTES;
|
||||
Arena_Reserve reserve_size = Arena_Reserve::Size_64K;
|
||||
Arena_Flags flags = Arena_Flags::None;
|
||||
u32 initial_commit_page_count = ARENA_DEFAULT_COMMIT_PAGE_COUNT;
|
||||
// Note that this downcasts to Arena, so can be initialized in the same way.
|
||||
Arena* current;
|
||||
Array<Arena*> next_arenas;
|
||||
};
|
||||
|
||||
ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve=Arena_Reserve::Size_64K, s32 commit_page_count=8);
|
||||
void* expandable_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
bool is_valid (ExpandableArena* arena);
|
||||
void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count);
|
||||
u8* expandable_arena_start (ExpandableArena* arena_ex);
|
||||
Allocator get_allocator (ExpandableArena* arena_ex);
|
||||
void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages=true);
|
||||
force_inline void arena_delete (ExpandableArena* arena_ex);
|
||||
@ -1,7 +1,7 @@
|
||||
#include <string.h>
|
||||
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
#include <mutex>
|
||||
#include <mutex> // #TODO: Replace with Mutex (see OS_Win32.cpp)
|
||||
global General_Allocator gAllocator; // @Shared
|
||||
global std::mutex allocator_mutex;
|
||||
#endif
|
||||
@ -35,11 +35,11 @@ General_Allocator* get_general_allocator_data() {
|
||||
|
||||
constexpr s64 Allocation_Tracking_Is_Enabled = GP_ALLOCATOR_TRACK_ALLOCATIONS;
|
||||
|
||||
bool GPAllocator_Tracking_Enabled() {
|
||||
bool GPAllocator_Tracking_Enabled () {
|
||||
return Allocation_Tracking_Is_Enabled != 0;
|
||||
}
|
||||
|
||||
void GPAllocator_Initialize_Allocation_Tracker() {
|
||||
void GPAllocator_Initialize_Allocation_Tracker () {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
constexpr s64 alignment = 64;
|
||||
s64 item_count_max = 64 * 4096;
|
||||
@ -50,7 +50,7 @@ void GPAllocator_Initialize_Allocation_Tracker() {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool GPAllocator_Is_This_Yours(void* old_memory) {
|
||||
bool GPAllocator_Is_This_Yours (void* old_memory) {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
std::lock_guard<std::mutex> lock(allocator_mutex);
|
||||
|
||||
@ -101,8 +101,11 @@ void* GPAllocator_New (s64 new_size, s64 alignment, bool initialize) {
|
||||
|
||||
auto memory = Aligned_Alloc(new_size, alignment);
|
||||
// _aligned_malloc does not zero memory, so we can zero it here
|
||||
if (initialize && memory) { memset(memory, 0, new_size); }
|
||||
if (initialize && memory) { memset(memory, ALLOCATOR_INIT_VALUE, new_size); }
|
||||
Add_Allocation(new_size, memory, (s32)alignment);
|
||||
|
||||
// printf("[GP] Allocating memory %p of size %llu\n", memory, new_size);
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
@ -117,10 +120,13 @@ void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alig
|
||||
auto new_memory_address = Aligned_Realloc(old_size, old_memory, new_size, alignment);
|
||||
|
||||
if (initialize && new_memory_address && new_size > old_size) {
|
||||
memset((u8*)new_memory_address + old_size, 0, new_size - old_size);
|
||||
memset((u8*)new_memory_address + old_size, ALLOCATOR_INIT_VALUE, new_size - old_size);
|
||||
}
|
||||
Remove_Allocation(old_memory);
|
||||
Add_Allocation(new_size, new_memory_address, (s32)alignment);
|
||||
|
||||
// printf("[GP] Rellocating memory %p of size %llu\n", new_memory_address, new_size);
|
||||
|
||||
return new_memory_address;
|
||||
}
|
||||
|
||||
@ -128,9 +134,11 @@ void GPAllocator_Delete (void* memory) {
|
||||
if (memory == nullptr) return;
|
||||
Aligned_Free(memory);
|
||||
Remove_Allocation(memory);
|
||||
|
||||
// printf("[GP] Deleting memory %p\n", memory);
|
||||
}
|
||||
|
||||
Allocator GPAllocator() {
|
||||
Allocator GPAllocator () {
|
||||
return { GPAllocator_Proc, nullptr };
|
||||
}
|
||||
|
||||
|
||||
@ -10,9 +10,12 @@
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc_dbg(ptr, sz, align, __FILE__, __LINE__)
|
||||
#define Aligned_Free(ptr) _aligned_free_dbg(ptr)
|
||||
#else
|
||||
#define Aligned_Alloc(sz, align) _aligned_malloc(sz, align)
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc(ptr, sz, align)
|
||||
#define Aligned_Free(ptr) _aligned_free(ptr)
|
||||
#define Aligned_Alloc(sz, align) std::malloc(sz)//_aligned_malloc(sz, align)
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) std::realloc(ptr, sz)//_aligned_realloc(ptr, sz, align)
|
||||
#define Aligned_Free(ptr) std::free(ptr)//_aligned_free(ptr)
|
||||
// #define Aligned_Alloc(sz, align) _aligned_malloc(sz, align)
|
||||
// #define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc(ptr, sz, align)
|
||||
// #define Aligned_Free(ptr) _aligned_free(ptr)
|
||||
#endif
|
||||
#else // Non-MSVC (POSIX / GCC / Clang)
|
||||
#include <cstdlib> // std::aligned_alloc
|
||||
@ -20,7 +23,6 @@
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) gp_aligned_realloc(old_sz, ptr, sz, align)
|
||||
#define Aligned_Free(ptr) std::free(ptr)
|
||||
#endif
|
||||
|
||||
struct Allocation {
|
||||
s64 size;
|
||||
void* memory;
|
||||
@ -40,7 +42,7 @@ General_Allocator* get_general_allocator_data();
|
||||
|
||||
constexpr u16 GPAllocator_Default_Alignment = 16;
|
||||
|
||||
Allocator GPAllocator();
|
||||
Allocator GPAllocator ();
|
||||
|
||||
void* GPAllocator_Proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
|
||||
@ -48,7 +50,7 @@ void* GPAllocator_New (s64 new_size, s64 alignment=16, bool initialize=true);
|
||||
void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alignment=16, bool initialize=true);
|
||||
void GPAllocator_Delete (void* memory);
|
||||
|
||||
bool GPAllocator_Is_This_Yours(void* old_memory);
|
||||
void GPAllocator_Initialize_Allocation_Tracker();
|
||||
bool GPAllocator_Tracking_Enabled();
|
||||
bool GPAllocator_Is_This_Yours (void* old_memory);
|
||||
void GPAllocator_Initialize_Allocation_Tracker ();
|
||||
bool GPAllocator_Tracking_Enabled ();
|
||||
|
||||
|
||||
27
lib/Base/Logger.cpp
Normal file
27
lib/Base/Logger.cpp
Normal file
@ -0,0 +1,27 @@
|
||||
void log (string fmt, ...) {
|
||||
String_Builder* sb = get_thread_context()->log_builder;
|
||||
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
print_to_builder(sb, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
// Append newline if needed
|
||||
string result = string_view(sb);
|
||||
bool ends_with_newline = (result.data[result.count-1] == '\n');
|
||||
if (!ends_with_newline) {
|
||||
append(sb, "\n");
|
||||
}
|
||||
|
||||
string message = string_view(sb);
|
||||
|
||||
Logger* logger = get_context_logger();
|
||||
logger->proc(message, Log_Level::None, logger->data);
|
||||
|
||||
reset_string_builder(sb);
|
||||
}
|
||||
|
||||
void print (string message) {
|
||||
Logger* logger = get_context_logger();
|
||||
logger->proc(message, Log_Level::None, logger->data);
|
||||
}
|
||||
75
lib/Base/Logger.h
Normal file
75
lib/Base/Logger.h
Normal file
@ -0,0 +1,75 @@
|
||||
// #TODO #Logger module
|
||||
// [ ] Add colored prints (See: Print_Color.jai)
|
||||
|
||||
// See Logger.jai in our jiim-dev-gui project for how to do fancy colored text.
|
||||
enum class Log_Level : s32 {
|
||||
TODO = -2,
|
||||
Trace = -1,
|
||||
None = 0,
|
||||
Info = 1,
|
||||
Warning = 2,
|
||||
Error = 3,
|
||||
Fatal_Error = 4,
|
||||
};
|
||||
|
||||
// log_function pointer
|
||||
typedef void (*Logger_Proc)(string log_message, Log_Level level, void* data);
|
||||
void default_logger_proc (string log_message, Log_Level level, void* data); //fwd declared
|
||||
|
||||
struct Logger {
|
||||
Logger_Proc proc;
|
||||
void* data;
|
||||
};
|
||||
|
||||
struct Default_Logger {
|
||||
Logger_Proc proc = default_logger_proc;
|
||||
String_Builder* sb = nullptr;
|
||||
|
||||
Mutex* mutex;
|
||||
#if OS_WINDOWS
|
||||
void* windows_standard_output;
|
||||
void* windows_standard_error;
|
||||
#endif
|
||||
};
|
||||
|
||||
global Default_Logger default_logger;
|
||||
|
||||
// default_logger_proc()...
|
||||
#if OS_WINDOWS
|
||||
void os_write_string_unsynchronized(string s, bool to_standard_error) {
|
||||
u32 written = 0;
|
||||
void* handle = (to_standard_error) ?
|
||||
default_logger.windows_standard_error :
|
||||
default_logger.windows_standard_output;
|
||||
Assert(handle != nullptr);
|
||||
bool result = (bool)WriteFile(handle, s.data, (u32)s.count, (LPDWORD)&written, nullptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
void default_logger_proc (string log_message, Log_Level level, void* data) {
|
||||
bool to_standard_error = level == Log_Level::Error || level == Log_Level::Fatal_Error;
|
||||
|
||||
lock(default_logger.mutex);
|
||||
os_write_string_unsynchronized(log_message, to_standard_error);
|
||||
unlock(default_logger.mutex);
|
||||
}
|
||||
|
||||
void default_logger_initialize() {
|
||||
// see: Bootstrap_Main_Thread_Context
|
||||
default_logger.mutex = New<Mutex>(true);
|
||||
mutex_init(default_logger.mutex);
|
||||
|
||||
#if OS_WINDOWS
|
||||
default_logger.windows_standard_output = GetStdHandle(STD_OUTPUT_HANDLE);
|
||||
default_logger.windows_standard_error = GetStdHandle(STD_ERROR_HANDLE);
|
||||
#endif
|
||||
}
|
||||
|
||||
// more hacky forward declares
|
||||
Logger* get_context_logger ();
|
||||
Allocator get_temp_allocator ();
|
||||
Allocator get_context_allocator ();
|
||||
|
||||
void log (string fmt, ...);
|
||||
void print (string message);
|
||||
|
||||
@ -1,127 +1,87 @@
|
||||
// #TODO: Integrate Allocator / context.allocator
|
||||
|
||||
|
||||
// Need to sort out how formatted strings and string builders are allocated
|
||||
// Maybe just use context.allocator?
|
||||
// What about temp strings? use context.temp?
|
||||
|
||||
struct string {
|
||||
s64 count;
|
||||
u8* data;
|
||||
// Construct from a string literal or C-string
|
||||
string () { // default constructor
|
||||
count = 0;
|
||||
data = nullptr;
|
||||
}
|
||||
|
||||
string (char* cstr) {
|
||||
count = strlen(cstr);
|
||||
data = (u8*)cstr;
|
||||
}
|
||||
|
||||
string (s64 _count, char* str) { count = _count; data = (u8*)str; }
|
||||
string (s64 _count, u8* str) { count = _count; data = str; }
|
||||
};
|
||||
|
||||
// ~ API ~ #TODO
|
||||
string copy_string (string str);
|
||||
bool strings_match(string first_string, string second_string);
|
||||
|
||||
// Unicode stuff
|
||||
string wide_to_utf8 (u16* source, s32 length);
|
||||
|
||||
// string string_view(string n_string, int start_index, int view_count);
|
||||
// string copy_string(char* c_string);
|
||||
// void free(string& n_string);
|
||||
|
||||
bool is_valid(string n_string);
|
||||
bool is_c_string(string n_string);
|
||||
|
||||
char* to_c_string(string n_string);
|
||||
|
||||
string format_string(char* format, ...);
|
||||
string string_from_literal(char* literal);
|
||||
|
||||
#include "General_Purpose_Allocator.h"
|
||||
#include <stdio.h> // vsnprintf
|
||||
#include <cstdarg> // va_list, ...
|
||||
|
||||
bool is_c_string(string n_string) {
|
||||
return (n_string.data && n_string.data[n_string.count] == '\0');
|
||||
// #TODO #string module
|
||||
// [ ] I'm debating if string type should automatically null-terminate.
|
||||
// I personally do not like it, and think we should temp-copy c-strings as they're needed.
|
||||
bool is_valid (string s) {
|
||||
return (s.data != nullptr && s.count > 0);
|
||||
}
|
||||
|
||||
bool is_valid(string n_string) {
|
||||
return (n_string.data != nullptr && n_string.count > 0);
|
||||
bool is_c_string (string s) {
|
||||
return (s.data && s.data[s.count] == '\0');
|
||||
}
|
||||
|
||||
string copy_string (string str) {
|
||||
string new_str = {};
|
||||
u8* to_c_string (string s) {
|
||||
u8* result = (u8*)internal_alloc(s.count + 1);
|
||||
|
||||
new_str.count = str.count;
|
||||
new_str.data = (u8*)internal_alloc(str.count);
|
||||
memcpy(result, s.data, s.count);
|
||||
result[s.count] = '\0';
|
||||
|
||||
memcpy(new_str.data, str.data, str.count);
|
||||
|
||||
return new_str;
|
||||
return result;
|
||||
}
|
||||
|
||||
string format_string (char* format, ...) {
|
||||
constexpr s64 BUFFER_SIZE = 4096;
|
||||
string copy_string (string s) {
|
||||
Assert(s.count > 0);
|
||||
if (s.count <= 0)
|
||||
return "";
|
||||
string str = {};
|
||||
|
||||
string str = {0};
|
||||
str.count = s.count;
|
||||
str.data = (u8*)internal_alloc(s.count + 1);
|
||||
|
||||
str.data = NewArray<u8>(BUFFER_SIZE);
|
||||
memcpy(str.data, s.data, s.count);
|
||||
|
||||
str.data[str.count] = '\0'; // null-terminate for backwards compatibility?
|
||||
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
// Note that this *is* null-terminated for compatibility.
|
||||
str.count = (s64)vsnprintf((char*)str.data, (size_t)BUFFER_SIZE, format, args);
|
||||
va_end(args);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
string copy_string(char* c_string) {
|
||||
string copy_string (char* c_string) {
|
||||
string str = {0};
|
||||
s64 string_length = strlen(c_string);
|
||||
if (string_length == 0)
|
||||
return "";
|
||||
|
||||
str.data = NewArray<u8>(string_length + 1);
|
||||
memcpy(str.data, c_string, string_length);
|
||||
str.count = string_length;
|
||||
|
||||
str.data[str.count] = '\0'; // null-terminate for backwards compatibility?
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
bool strings_match(string first_string, string second_string) {
|
||||
if (first_string.count != second_string.count) {
|
||||
return false;
|
||||
string to_string (ArrayView<u8> str) {
|
||||
return {str.count, str.data};
|
||||
}
|
||||
|
||||
void string_free (string& s) {
|
||||
internal_free(s.data);
|
||||
|
||||
s.data = nullptr;
|
||||
s.count = 0;
|
||||
}
|
||||
|
||||
force_inline string string_view (string s, s64 start_index, s64 view_count) {
|
||||
Assert(view_count >= 0); Assert(start_index >= 0);
|
||||
if (view_count < 0 || start_index < 0 || start_index >= s.count) return "";
|
||||
|
||||
s64 new_count = view_count;
|
||||
if (start_index + view_count > s.count) {
|
||||
new_count = s.count - start_index;
|
||||
}
|
||||
|
||||
for (s64 i = 0; i < first_string.count; i += 1) {
|
||||
if (first_string.data[i] != second_string.data[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
return { new_count, s.data + start_index };
|
||||
}
|
||||
|
||||
string string_from_literal(char* literal) {
|
||||
string new_string;
|
||||
new_string.count = strlen(literal);
|
||||
new_string.data = (u8*) literal;
|
||||
|
||||
return new_string;
|
||||
string copy_string_view (string s, s64 start_index, s64 view_count) {
|
||||
// maybe redundant...
|
||||
return copy_string(string_view(s, start_index, view_count));
|
||||
}
|
||||
|
||||
void free(string& n_string) {
|
||||
internal_free(n_string.data);
|
||||
|
||||
n_string.data = nullptr;
|
||||
n_string.count = 0;
|
||||
bool strings_match (string first_string, string second_string) {
|
||||
return (first_string == second_string);
|
||||
}
|
||||
|
||||
// Unicode nonsense
|
||||
// #Unicode
|
||||
string wide_to_utf8 (u16* source, s32 length) {
|
||||
if (length == 0) return { };
|
||||
|
||||
@ -149,3 +109,108 @@ string wide_to_utf8 (u16* source, s32 length) {
|
||||
return utf8_string;
|
||||
}
|
||||
|
||||
string format_string (char* format, ...) {
|
||||
constexpr s64 BUFFER_SIZE = 4096;
|
||||
|
||||
string str = {0};
|
||||
|
||||
str.data = NewArray<u8>(BUFFER_SIZE);
|
||||
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
// Note that this *is* null-terminated for compatibility.
|
||||
str.count = (s64)vsnprintf((char*)str.data, (size_t)BUFFER_SIZE, format, args);
|
||||
va_end(args);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
force_inline String_Builder* new_string_builder (Arena_Reserve new_reserve) {
|
||||
return arena_array_new<u8>(1, new_reserve);
|
||||
}
|
||||
|
||||
force_inline void append (String_Builder* sb, string s) {
|
||||
array_add(*sb, ArrayView<u8>(s.count, s.data));
|
||||
}
|
||||
|
||||
void append (String_Builder* sb, ArrayView<string> strings) {
|
||||
s64 combined_length = 0;
|
||||
for (s64 i = 0; i < strings.count; i += 1) {
|
||||
combined_length += strings[i].count;
|
||||
}
|
||||
|
||||
s64 final_length = sb->count + combined_length;
|
||||
|
||||
if (sb->allocated < final_length) {
|
||||
array_reserve(*sb, final_length);
|
||||
}
|
||||
|
||||
for (s64 i = 0; i < strings.count; i += 1) {
|
||||
string s = strings[i];
|
||||
array_add(*sb, ArrayView<u8>(s.count, s.data));
|
||||
}
|
||||
}
|
||||
|
||||
force_inline void append_no_add (String_Builder* sb, string s) {
|
||||
array_add(*sb, ArrayView<u8>(s.count, s.data));
|
||||
sb->count -= s.count;
|
||||
}
|
||||
|
||||
// Unfortunately this follows the printf format, which is annoying.
|
||||
// I'd rather have something like fmt::
|
||||
void print_to_builder (String_Builder* sb, string format, va_list args) {
|
||||
s64 expected_final_count = sb->count + format.count + 4096;
|
||||
|
||||
if (sb->allocated < expected_final_count) {
|
||||
array_reserve(*sb, expected_final_count);
|
||||
}
|
||||
|
||||
s64 buffer_size = sb->allocated - sb->count; // available space
|
||||
u8* current_point = &sb->data[sb->count];
|
||||
|
||||
s64 print_count = (s64)vsnprintf((char*)current_point, (size_t)buffer_size, (char*)format.data, args);
|
||||
|
||||
sb->count += print_count;
|
||||
}
|
||||
|
||||
void print_to_builder (String_Builder* sb, string format, ...) {
|
||||
s64 expected_final_count = sb->count + format.count + 4096;
|
||||
|
||||
if (sb->allocated < expected_final_count) {
|
||||
array_reserve(*sb, expected_final_count);
|
||||
}
|
||||
|
||||
s64 buffer_size = sb->allocated - sb->count; // available space
|
||||
u8* current_point = &sb->data[sb->count];
|
||||
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
s64 print_count = (s64)vsnprintf((char*)current_point, (size_t)buffer_size, (char*)format.data, args);
|
||||
va_end(args);
|
||||
|
||||
sb->count += print_count;
|
||||
}
|
||||
|
||||
string string_view (String_Builder* sb) {
|
||||
// should probably ensure final byte is null terminated...
|
||||
append_no_add(sb, "\0"); // doesn't increment sb.count
|
||||
return to_string(to_view(*sb));
|
||||
}
|
||||
|
||||
// for when we want to keep the string builder around and recycle the memory.
|
||||
internal force_inline void reset_string_builder (String_Builder* sb) {
|
||||
poison_range(*sb, 0, sb->count);
|
||||
reset_keeping_memory(*sb);
|
||||
}
|
||||
|
||||
force_inline string builder_to_string (String_Builder* sb) {
|
||||
string final_string = copy_string(to_string(to_view(*sb)));
|
||||
|
||||
free_string_builder(sb);
|
||||
|
||||
return final_string;
|
||||
}
|
||||
|
||||
internal force_inline void free_string_builder (String_Builder* sb) {
|
||||
arena_array_free(*sb);
|
||||
}
|
||||
|
||||
102
lib/Base/String.h
Normal file
102
lib/Base/String.h
Normal file
@ -0,0 +1,102 @@
|
||||
#pragma once
|
||||
// #TODO: #strings:
|
||||
// [ ] Always null-terminate strings!
|
||||
// [ ] How do I accept variadic arguments of any type to my print function?
|
||||
// [ ] Need to sort out how formatted strings and string builders are allocated
|
||||
// [ ] Separate functions for temp alloc (tprint??)
|
||||
// [ ] API needs to be completely overhauled
|
||||
// [ ] I should also put path manipulation here or in a separate file.
|
||||
|
||||
struct string {
|
||||
s64 count;
|
||||
u8* data;
|
||||
// Construct from a string literal or C-string
|
||||
string () { // default constructor
|
||||
count = 0;
|
||||
data = nullptr;
|
||||
}
|
||||
|
||||
string (char* cstr) {
|
||||
count = strlen(cstr);
|
||||
data = (u8*)cstr;
|
||||
}
|
||||
|
||||
string (s64 _count, char* str) { count = _count; data = (u8*)str; }
|
||||
string (s64 _count, u8* str) { count = _count; data = str; }
|
||||
|
||||
bool operator==(const string& other) const {
|
||||
string first_string = *this;
|
||||
string second_string = other;
|
||||
// return strings_match(*this, other);
|
||||
if (first_string.count != second_string.count) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (s64 i = 0; i < first_string.count; i += 1) {
|
||||
if (first_string.data[i] != second_string.data[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct wstring {
|
||||
s64 count;
|
||||
u16* data;
|
||||
|
||||
wstring () { // default constructor
|
||||
count = 0;
|
||||
data = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// ~Keep these API
|
||||
bool is_valid (string s);
|
||||
bool is_c_string (string s);
|
||||
u8* to_c_string (string s); // #allocates
|
||||
string copy_string (string s); // #allocates, returned string is #null-terminated.
|
||||
string copy_string (char* c_string); // #allocates, returned string is #null-terminated.
|
||||
string to_string (ArrayView<u8> str);
|
||||
void free(string& s);
|
||||
|
||||
// String manipulation & comparison
|
||||
force_inline string string_view (string s, s64 start_index, s64 view_count);
|
||||
string copy_string_view (string s, s64 start_index, s64 view_count);
|
||||
bool strings_match (string first_string, string second_string);
|
||||
|
||||
// #Unicode
|
||||
string wide_to_utf8 (u16* source, s32 length);
|
||||
// wstring utf8_to_wide (string source); TODO.
|
||||
|
||||
string format_string (char* format, ...);
|
||||
|
||||
// Parsing stuff:
|
||||
// is_white_space(char: u8)
|
||||
// advance
|
||||
// eat_spaces
|
||||
|
||||
// Print stuff
|
||||
// s64 string_to_int (string v, s32 base = 10, s64* remainder=nullptr);
|
||||
//
|
||||
|
||||
// #string_builder
|
||||
// #limitations This won't be as fast as Jon's String_Builder in jai because we're backing it with an
|
||||
// Arena, which requires a variable number of cycles depending on if our process has
|
||||
// memory available already. It also has a max capacity depending on what Arena_Reserve we choose.
|
||||
// That being said, the implementation is much simpler.
|
||||
typedef ArenaArray<u8> String_Builder; // struct String_Builder
|
||||
|
||||
force_inline String_Builder* new_string_builder (Arena_Reserve new_reserve=Arena_Reserve::Size_64K);
|
||||
force_inline void append (String_Builder* sb, string s);
|
||||
void append (String_Builder* sb, ArrayView<string> strings);
|
||||
internal force_inline void append_no_add (String_Builder* sb, string s); // for appending null terminators, does not increment count.
|
||||
void print_to_builder (String_Builder* sb, string format, ...);
|
||||
void print_to_builder (String_Builder* sb, string format, va_list args);
|
||||
string string_view (String_Builder* sb);
|
||||
internal force_inline void reset_string_builder (String_Builder* sb);
|
||||
|
||||
force_inline string builder_to_string (String_Builder* sb); // returns string view
|
||||
internal force_inline void free_string_builder (String_Builder* sb);
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
// Thread_Group Internal Procedures
|
||||
// #NOTE: There is no logging in this implementation!
|
||||
|
||||
void init(Work_List* list) {
|
||||
Assert(list != nullptr);
|
||||
|
||||
@ -76,8 +78,6 @@ s64 thread_group_run (Thread* thread) {
|
||||
entry->thread_index = thread->index;
|
||||
entry->next = nullptr;
|
||||
|
||||
// #TODO(Log)
|
||||
|
||||
Thread_Continue_Status should_continue = Thread_Continue_Status::THREAD_CONTINUE;
|
||||
if (group->proc) {
|
||||
should_continue = group->proc(group, thread, entry->work);
|
||||
@ -103,8 +103,7 @@ s64 thread_group_run (Thread* thread) {
|
||||
for (s64 i = 0; i < info->work_steal_indices.count; i += 1) {
|
||||
entry = get_work(&group->worker_info[i].available);
|
||||
if (entry) {
|
||||
// #TODO(Log)
|
||||
break; // for
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -227,7 +226,7 @@ bool shutdown (Thread_Group* group, s32 timeout_milliseconds = -1) {
|
||||
|
||||
// Should have a shutdown_and_reset option too (see how I did it in prototyping-main)
|
||||
|
||||
void add_work (Thread_Group* group, void* work) { // string logging_name
|
||||
void add_work (Thread_Group* group, void* work) {
|
||||
Assert(group->worker_info.count > 0);
|
||||
|
||||
push_allocator(group->allocator);
|
||||
@ -235,7 +234,6 @@ void add_work (Thread_Group* group, void* work) { // string logging_name
|
||||
// Make a work entry, a linked list node that lets us queue and unqueue
|
||||
Work_Entry* entry = New<Work_Entry>();
|
||||
entry->work = work;
|
||||
// entry->logging_name = "";
|
||||
entry->issue_time = GetUnixTimestamp();
|
||||
|
||||
// Choose which thread will run this work.
|
||||
@ -251,8 +249,6 @@ void add_work (Thread_Group* group, void* work) { // string logging_name
|
||||
// Add this node to the linked list of available work for that thread:
|
||||
Work_List* list = &group->worker_info[thread_index].available;
|
||||
add_work(list, entry);
|
||||
|
||||
// #TODO: Log if necessary.
|
||||
}
|
||||
|
||||
ArrayView<void*> get_completed_work (Thread_Group* group) {
|
||||
@ -290,14 +286,13 @@ ArrayView<void*> get_completed_work (Thread_Group* group) {
|
||||
|
||||
if (!completed) continue;
|
||||
|
||||
// Reserve the output array. Probably doesn't help much. Note that
|
||||
// we are maybe adding small numbers of results over a larger number
|
||||
// of cores. Really if we want to be efficient here, we can build
|
||||
// #TODO: #Thread_Group #array_reserve - try to do this in two passes:
|
||||
// Note that we are maybe adding small numbers of results over a larger
|
||||
// number of cores. Really, if we want to be efficient here, we can build
|
||||
// a larger linked list out of the mini-lists we gather, and accumulate
|
||||
// the counts, then do the reserve all in one batch when we are done
|
||||
// looking at the threads. For simplicity this has not yet been done,
|
||||
// but it may not be much more complicated, actually.
|
||||
// #TODO(Musa) - do this^
|
||||
array_reserve(results, results.count + new_count);
|
||||
s64 old_count = results.count;
|
||||
|
||||
@ -305,8 +300,6 @@ ArrayView<void*> get_completed_work (Thread_Group* group) {
|
||||
array_add(results, completed->work);
|
||||
Work_Entry* next = completed->next;
|
||||
|
||||
// #TODO(Log)
|
||||
|
||||
internal_free(completed);
|
||||
completed = next;
|
||||
}
|
||||
@ -314,6 +307,6 @@ ArrayView<void*> get_completed_work (Thread_Group* group) {
|
||||
Assert(results.count == old_count + new_count);
|
||||
}
|
||||
|
||||
return {};
|
||||
return ArrayView<void*>(results);
|
||||
}
|
||||
|
||||
|
||||
@ -35,7 +35,6 @@
|
||||
};
|
||||
#endif
|
||||
|
||||
struct Thread;
|
||||
// really hacky forward declares.
|
||||
struct Work_Entry;
|
||||
struct Worker_Info;
|
||||
@ -108,5 +107,4 @@ struct Thread_Group {
|
||||
bool initialized = false;
|
||||
bool started = false;
|
||||
bool should_exit = false;
|
||||
// bool enable_logging;
|
||||
};
|
||||
|
||||
@ -1,51 +0,0 @@
|
||||
internal void Bootstrap_Main_Thread_Context () {
|
||||
// 0. Setup general purpose allocator
|
||||
GPAllocator_Initialize_Allocation_Tracker();
|
||||
// 1. Setup arena table
|
||||
initialize_arena_table();
|
||||
// 2. Setup thread local context
|
||||
Arena* arena = next_arena(Arena_Reserve::Size_64G);
|
||||
thread_local_context = New<Thread_Context>(get_allocator(arena));
|
||||
thread_local_context->temp = next_arena(Arena_Reserve::Size_64G);
|
||||
thread_local_context->arena = arena;
|
||||
thread_local_context->allocator = get_allocator(arena);
|
||||
thread_local_context->thread_idx = 0;
|
||||
thread_local_context->thread_name = "Main Thread";
|
||||
// thread_local_context->logger = init_logger();
|
||||
|
||||
}
|
||||
|
||||
// #include "lib/Base/Arena_Array.h"
|
||||
void run_arena_array_tests () {
|
||||
{ push_arena(thread_local_context->temp);
|
||||
push_alignment(thread_local_context->temp, 1);
|
||||
auto_reset(thread_local_context->temp);
|
||||
auto something = New<Thread_Context>();
|
||||
auto something2 = New<Array<s64>>();
|
||||
auto something3 = internal_alloc(5000);
|
||||
}
|
||||
|
||||
{ push_allocator(GPAllocator());
|
||||
auto something = New<Thread_Context>();
|
||||
auto something2 = New<Array<s64>>();
|
||||
}
|
||||
|
||||
// { auto na = arena_array_new<s64>(64000, Arena_Reserve::Size_64G);
|
||||
// array_add(...)
|
||||
// }
|
||||
}
|
||||
|
||||
internal void Main_Entry_Point (int argc, WCHAR **argv) {
|
||||
run_arena_array_tests();
|
||||
|
||||
Worker_Info* info = (Worker_Info*)GPAllocator_New(sizeof(Worker_Info), 64);
|
||||
|
||||
debug_break();
|
||||
printf("sizeof(Worker_Info): %zd\n", sizeof(Thread));
|
||||
printf("sizeof(Worker_Info): %zd\n", sizeof(Worker_Info));
|
||||
|
||||
// #TODO:
|
||||
// [ ] Launch second thread
|
||||
// [ ] Setup Mouse and Keyboard Inputs
|
||||
// OS_Create_Window();
|
||||
}
|
||||
@ -1,12 +1,17 @@
|
||||
// #TODO: #OS_Win32
|
||||
// [ ] #Thread cleanup: in `thread_deinit` is there any requirement to cleanup child threads?
|
||||
// [ ] #Exception handling code in `Win32_Exception_Filter`
|
||||
// [ ] #cpuid - enumerate CPUs and Thread count (current implementation doesn't work)
|
||||
|
||||
#if OS_WINDOWS
|
||||
constexpr s64 FILETIME_TO_UNIX = 116444736000000000i64;
|
||||
f64 GetUnixTimestamp() {
|
||||
f64 GetUnixTimestamp () {
|
||||
FILETIME fileTime;
|
||||
GetSystemTimePreciseAsFileTime(&fileTime);
|
||||
s64 ticks = ((s64)fileTime.dwHighDateTime << (s64)32) | (s64)fileTime.dwLowDateTime;
|
||||
return (ticks - FILETIME_TO_UNIX) / (10.0 * 1000.0 * 1000.0);
|
||||
}
|
||||
s64 GetUnixTimestampNanoseconds() {
|
||||
s64 GetUnixTimestampNanoseconds () {
|
||||
FILETIME fileTime;
|
||||
GetSystemTimePreciseAsFileTime(&fileTime);
|
||||
|
||||
@ -54,7 +59,7 @@ internal b32 win32_g_is_quiet = 0; // No console output
|
||||
internal LONG WINAPI Win32_Exception_Filter (EXCEPTION_POINTERS* exception_ptrs) {
|
||||
if (win32_g_is_quiet) { ExitProcess(1); }
|
||||
|
||||
static volatile LONG first = 0;
|
||||
local_persist volatile LONG first = 0;
|
||||
if(InterlockedCompareExchange(&first, 1, 0) != 0)
|
||||
{ // prevent failures in other threads to popup same message box
|
||||
// this handler just shows first thread that crashes
|
||||
@ -62,12 +67,11 @@ internal LONG WINAPI Win32_Exception_Filter (EXCEPTION_POINTERS* exception_ptrs)
|
||||
for (;;) Sleep(1000);
|
||||
}
|
||||
|
||||
// #TODO: Exception handling code.
|
||||
// #Exception handling code (TODO)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
internal void Bootstrap_Main_Thread_Context ();
|
||||
// internal void Main_Entry_Point (int argc, WCHAR **argv);
|
||||
internal void Win32_Entry_Point (int argc, WCHAR **argv) {
|
||||
// See: w32_entry_point_caller(); (raddebugger)
|
||||
@ -95,11 +99,8 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
|
||||
// }
|
||||
// }
|
||||
|
||||
Bootstrap_Main_Thread_Context();
|
||||
|
||||
push_arena(get_thread_context()->arena);
|
||||
|
||||
// #TODO: Need to write Win32 abstraction layer first.
|
||||
{ OS_System_Info* info = &os_state_w32.system_info;
|
||||
info->logical_processor_count = (s32)sysinfo.dwNumberOfProcessors;
|
||||
info->page_size = sysinfo.dwPageSize;
|
||||
@ -110,6 +111,7 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
|
||||
info->large_pages_allowed = false;
|
||||
info->process_id = GetCurrentProcessId();
|
||||
}
|
||||
// #cpuid
|
||||
/*{ OS_System_Info* info = &os_state_w32.system_info;
|
||||
// [ ] Extract input args
|
||||
u32 length;
|
||||
@ -149,7 +151,7 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
|
||||
info->physical_core_count = (s32)all_cpus_count;
|
||||
info->primary_core_count = (s32)performance_core_count;
|
||||
}
|
||||
// info->secondary_core_count = #TODO;
|
||||
// info->secondary_core_count = ;
|
||||
*/
|
||||
{ OS_System_Info* info = &os_state_w32.system_info;
|
||||
u8 buffer[MAX_COMPUTERNAME_LENGTH + 1] = {0};
|
||||
@ -173,8 +175,6 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
|
||||
// [ ] GetEnvironmentStringsW
|
||||
// temp_reset();
|
||||
printf("Hello there!\n\n");
|
||||
// See: main_thread_base_entry_point
|
||||
Main_Entry_Point(argc, argv);
|
||||
}
|
||||
|
||||
C_LINKAGE DWORD OS_Windows_Thread_Entry_Point (void* parameter) {
|
||||
@ -202,16 +202,14 @@ internal bool thread_init (Thread* thread, Thread_Proc proc, string thread_name=
|
||||
|
||||
s64 this_thread_index = InterlockedIncrement(&next_thread_index);
|
||||
|
||||
// We may not always want such a bulky thread startup. The
|
||||
// size of the starting arena and temp should be parameterized (+2 bytes)
|
||||
// make thread_init_ex with params...
|
||||
Arena* arena = next_arena(Arena_Reserve::Size_64G);
|
||||
push_arena(arena);
|
||||
thread->context = New<Thread_Context>(get_allocator(arena));
|
||||
thread->context->temp = next_arena(Arena_Reserve::Size_64G);
|
||||
thread->context->arena = arena;
|
||||
thread->context->allocator = get_allocator(arena);
|
||||
ExpandableArena* arena_ex = expandable_arena_new(Arena_Reserve::Size_64M, 16);
|
||||
|
||||
thread->context = New<Thread_Context>(get_allocator(arena_ex));
|
||||
thread->context->temp = expandable_arena_new(Arena_Reserve::Size_2M, 16);
|
||||
thread->context->arena = arena_ex;
|
||||
thread->context->allocator = get_allocator(arena_ex);
|
||||
thread->context->thread_idx = (s32)this_thread_index;
|
||||
push_arena(arena_ex);
|
||||
thread->context->thread_name = copy_string(thread_name);
|
||||
|
||||
thread->os_thread.windows_thread = windows_thread;
|
||||
@ -229,9 +227,8 @@ internal void thread_deinit (Thread* thread) {
|
||||
}
|
||||
thread->os_thread.windows_thread = nullptr;
|
||||
|
||||
// #TODO: Thread cleanup:
|
||||
release_arena(thread->context->temp, true);
|
||||
release_arena(thread->context->arena, true);
|
||||
arena_delete(thread->context->temp);
|
||||
arena_delete(thread->context->arena);
|
||||
}
|
||||
|
||||
internal void thread_start (Thread* thread) {
|
||||
@ -258,7 +255,7 @@ internal void lock (Mutex* mutex) {
|
||||
internal void unlock (Mutex* mutex) {
|
||||
LeaveCriticalSection(&mutex->csection);
|
||||
}
|
||||
internal void semaphore_init (Semaphore* sem, s32 initial_value = 0) {
|
||||
internal void semaphore_init (Semaphore* sem, s32 initial_value) {
|
||||
Assert(initial_value >= 0);
|
||||
sem->event = CreateSemaphoreW(nullptr, initial_value, 0x7fffffff, nullptr);
|
||||
}
|
||||
@ -269,13 +266,7 @@ internal void signal (Semaphore* sem) {
|
||||
ReleaseSemaphore(sem->event, 1, nullptr);
|
||||
}
|
||||
|
||||
enum class Wait_For_Result : s32 {
|
||||
SUCCESS = 0,
|
||||
TIMEOUT = 1,
|
||||
ERROR = 2 // can't use ERROR because of Windows.h *sigh*
|
||||
};
|
||||
|
||||
internal Wait_For_Result wait_for (Semaphore* sem, s32 milliseconds = -1) {
|
||||
internal Wait_For_Result wait_for (Semaphore* sem, s32 milliseconds) {
|
||||
DWORD res = 0;
|
||||
if (milliseconds < 0) {
|
||||
res = WaitForSingleObject(sem->event, INFINITE);
|
||||
@ -297,7 +288,7 @@ internal void condition_variable_init (Condition_Variable* cv) {
|
||||
internal void condition_variable_destroy (Condition_Variable* cv) {
|
||||
// No action required.
|
||||
}
|
||||
internal void wait (Condition_Variable* cv, Mutex* mutex, s32 wait_time_ms = -1) {
|
||||
internal void wait (Condition_Variable* cv, Mutex* mutex, s32 wait_time_ms) {
|
||||
SleepConditionVariableCS(&cv->condition_variable, &mutex->csection, (DWORD)wait_time_ms);
|
||||
}
|
||||
internal void wake (Condition_Variable* cv) {
|
||||
@ -306,3 +297,18 @@ internal void wake (Condition_Variable* cv) {
|
||||
internal void wake_all (Condition_Variable* cv) {
|
||||
WakeAllConditionVariable(&cv->condition_variable);
|
||||
}
|
||||
|
||||
// #window_creation
|
||||
Window_Type create_window (string new_window_name) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// #TODO: #window_creation
|
||||
// [ ] resize_window
|
||||
// [ ] position_window
|
||||
// [ ] toggle_fullscreen
|
||||
// [ ] get_dimensions
|
||||
|
||||
// #TODO: #window_interaction (mouse/keyboard)
|
||||
// [ ] get_mouse_pointer_position
|
||||
// [ ] ... What APIs do I need for Keyboard
|
||||
32
lib/OS/OS_Win32.h
Normal file
32
lib/OS/OS_Win32.h
Normal file
@ -0,0 +1,32 @@
|
||||
f64 GetUnixTimestamp ();
|
||||
s64 GetUnixTimestampNanoseconds ();
|
||||
|
||||
struct Condition_Variable;
|
||||
struct Semaphore;
|
||||
struct Mutex;
|
||||
struct OS_Thread;
|
||||
|
||||
enum class Wait_For_Result : s32 {
|
||||
SUCCESS = 0,
|
||||
TIMEOUT = 1,
|
||||
ERROR = 2 // can't use ERROR because of Windows.h *sigh*
|
||||
};
|
||||
|
||||
internal void mutex_init (Mutex* mutex);
|
||||
internal void mutex_destroy (Mutex* mutex);
|
||||
internal void lock (Mutex* mutex);
|
||||
internal void unlock (Mutex* mutex);
|
||||
|
||||
internal void semaphore_init (Semaphore* sem, s32 initial_value = 0);
|
||||
internal void semaphore_destroy (Semaphore* sem);
|
||||
internal void signal (Semaphore* sem);
|
||||
internal Wait_For_Result wait_for (Semaphore* sem, s32 milliseconds = -1);
|
||||
|
||||
internal void condition_variable_init (Condition_Variable* cv);
|
||||
internal void condition_variable_destroy (Condition_Variable* cv);
|
||||
internal void wait (Condition_Variable* cv, Mutex* mutex, s32 wait_time_ms = -1);
|
||||
internal void wake (Condition_Variable* cv);
|
||||
internal void wake_all (Condition_Variable* cv);
|
||||
|
||||
// #window_creation
|
||||
typedef HWND Window_Type;
|
||||
147
lib/third_party/dear-imgui/imconfig.h
vendored
Normal file
147
lib/third_party/dear-imgui/imconfig.h
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
//-----------------------------------------------------------------------------
|
||||
// DEAR IMGUI COMPILE-TIME OPTIONS
|
||||
// Runtime options (clipboard callbacks, enabling various features, etc.) can generally be set via the ImGuiIO structure.
|
||||
// You can use ImGui::SetAllocatorFunctions() before calling ImGui::CreateContext() to rewire memory allocation functions.
|
||||
//-----------------------------------------------------------------------------
|
||||
// A) You may edit imconfig.h (and not overwrite it when updating Dear ImGui, or maintain a patch/rebased branch with your modifications to it)
|
||||
// B) or '#define IMGUI_USER_CONFIG "my_imgui_config.h"' in your project and then add directives in your own file without touching this template.
|
||||
//-----------------------------------------------------------------------------
|
||||
// You need to make sure that configuration settings are defined consistently _everywhere_ Dear ImGui is used, which include the imgui*.cpp
|
||||
// files but also _any_ of your code that uses Dear ImGui. This is because some compile-time options have an affect on data structures.
|
||||
// Defining those options in imconfig.h will ensure every compilation unit gets to see the same data structure layouts.
|
||||
// Call IMGUI_CHECKVERSION() from your .cpp file to verify that the data structures your files are using are matching the ones imgui.cpp is using.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
#pragma once
|
||||
|
||||
//---- Define assertion handler. Defaults to calling assert().
|
||||
// - If your macro uses multiple statements, make sure is enclosed in a 'do { .. } while (0)' block so it can be used as a single statement.
|
||||
// - Compiling with NDEBUG will usually strip out assert() to nothing, which is NOT recommended because we use asserts to notify of programmer mistakes.
|
||||
//#define IM_ASSERT(_EXPR) MyAssert(_EXPR)
|
||||
//#define IM_ASSERT(_EXPR) ((void)(_EXPR)) // Disable asserts
|
||||
|
||||
//---- Define attributes of all API symbols declarations, e.g. for DLL under Windows
|
||||
// Using Dear ImGui via a shared library is not recommended, because of function call overhead and because we don't guarantee backward nor forward ABI compatibility.
|
||||
// - Windows DLL users: heaps and globals are not shared across DLL boundaries! You will need to call SetCurrentContext() + SetAllocatorFunctions()
|
||||
// for each static/DLL boundary you are calling from. Read "Context and Memory Allocators" section of imgui.cpp for more details.
|
||||
//#define IMGUI_API __declspec(dllexport) // MSVC Windows: DLL export
|
||||
//#define IMGUI_API __declspec(dllimport) // MSVC Windows: DLL import
|
||||
//#define IMGUI_API __attribute__((visibility("default"))) // GCC/Clang: override visibility when set is hidden
|
||||
|
||||
//---- Don't define obsolete functions/enums/behaviors. Consider enabling from time to time after updating to clean your code of obsolete function/names.
|
||||
//#define IMGUI_DISABLE_OBSOLETE_FUNCTIONS
|
||||
|
||||
//---- Disable all of Dear ImGui or don't implement standard windows/tools.
|
||||
// It is very strongly recommended to NOT disable the demo windows and debug tool during development. They are extremely useful in day to day work. Please read comments in imgui_demo.cpp.
|
||||
//#define IMGUI_DISABLE // Disable everything: all headers and source files will be empty.
|
||||
//#define IMGUI_DISABLE_DEMO_WINDOWS // Disable demo windows: ShowDemoWindow()/ShowStyleEditor() will be empty.
|
||||
//#define IMGUI_DISABLE_DEBUG_TOOLS // Disable metrics/debugger and other debug tools: ShowMetricsWindow(), ShowDebugLogWindow() and ShowIDStackToolWindow() will be empty.
|
||||
|
||||
//---- Don't implement some functions to reduce linkage requirements.
|
||||
//#define IMGUI_DISABLE_WIN32_DEFAULT_CLIPBOARD_FUNCTIONS // [Win32] Don't implement default clipboard handler. Won't use and link with OpenClipboard/GetClipboardData/CloseClipboard etc. (user32.lib/.a, kernel32.lib/.a)
|
||||
//#define IMGUI_ENABLE_WIN32_DEFAULT_IME_FUNCTIONS // [Win32] [Default with Visual Studio] Implement default IME handler (require imm32.lib/.a, auto-link for Visual Studio, -limm32 on command-line for MinGW)
|
||||
//#define IMGUI_DISABLE_WIN32_DEFAULT_IME_FUNCTIONS // [Win32] [Default with non-Visual Studio compilers] Don't implement default IME handler (won't require imm32.lib/.a)
|
||||
//#define IMGUI_DISABLE_WIN32_FUNCTIONS // [Win32] Won't use and link with any Win32 function (clipboard, IME).
|
||||
//#define IMGUI_ENABLE_OSX_DEFAULT_CLIPBOARD_FUNCTIONS // [OSX] Implement default OSX clipboard handler (need to link with '-framework ApplicationServices', this is why this is not the default).
|
||||
//#define IMGUI_DISABLE_DEFAULT_SHELL_FUNCTIONS // Don't implement default platform_io.Platform_OpenInShellFn() handler (Win32: ShellExecute(), require shell32.lib/.a, Mac/Linux: use system("")).
|
||||
//#define IMGUI_DISABLE_DEFAULT_FORMAT_FUNCTIONS // Don't implement ImFormatString/ImFormatStringV so you can implement them yourself (e.g. if you don't want to link with vsnprintf)
|
||||
//#define IMGUI_DISABLE_DEFAULT_MATH_FUNCTIONS // Don't implement ImFabs/ImSqrt/ImPow/ImFmod/ImCos/ImSin/ImAcos/ImAtan2 so you can implement them yourself.
|
||||
//#define IMGUI_DISABLE_FILE_FUNCTIONS // Don't implement ImFileOpen/ImFileClose/ImFileRead/ImFileWrite and ImFileHandle at all (replace them with dummies)
|
||||
//#define IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS // Don't implement ImFileOpen/ImFileClose/ImFileRead/ImFileWrite and ImFileHandle so you can implement them yourself if you don't want to link with fopen/fclose/fread/fwrite. This will also disable the LogToTTY() function.
|
||||
//#define IMGUI_DISABLE_DEFAULT_ALLOCATORS // Don't implement default allocators calling malloc()/free() to avoid linking with them. You will need to call ImGui::SetAllocatorFunctions().
|
||||
//#define IMGUI_DISABLE_DEFAULT_FONT // Disable default embedded font (ProggyClean.ttf), remove ~9.5 KB from output binary. AddFontDefault() will assert.
|
||||
//#define IMGUI_DISABLE_SSE // Disable use of SSE intrinsics even if available
|
||||
|
||||
//---- Enable Test Engine / Automation features.
|
||||
//#define IMGUI_ENABLE_TEST_ENGINE // Enable imgui_test_engine hooks. Generally set automatically by include "imgui_te_config.h", see Test Engine for details.
|
||||
|
||||
//---- Include imgui_user.h at the end of imgui.h as a convenience
|
||||
// May be convenient for some users to only explicitly include vanilla imgui.h and have extra stuff included.
|
||||
//#define IMGUI_INCLUDE_IMGUI_USER_H
|
||||
//#define IMGUI_USER_H_FILENAME "my_folder/my_imgui_user.h"
|
||||
|
||||
//---- Pack vertex colors as BGRA8 instead of RGBA8 (to avoid converting from one to another). Need dedicated backend support.
|
||||
//#define IMGUI_USE_BGRA_PACKED_COLOR
|
||||
|
||||
//---- Use legacy CRC32-adler tables (used before 1.91.6), in order to preserve old .ini data that you cannot afford to invalidate.
|
||||
//#define IMGUI_USE_LEGACY_CRC32_ADLER
|
||||
|
||||
//---- Use 32-bit for ImWchar (default is 16-bit) to support Unicode planes 1-16. (e.g. point beyond 0xFFFF like emoticons, dingbats, symbols, shapes, ancient languages, etc...)
|
||||
//#define IMGUI_USE_WCHAR32
|
||||
|
||||
//---- Avoid multiple STB libraries implementations, or redefine path/filenames to prioritize another version
|
||||
// By default the embedded implementations are declared static and not available outside of Dear ImGui sources files.
|
||||
//#define IMGUI_STB_TRUETYPE_FILENAME "my_folder/stb_truetype.h"
|
||||
//#define IMGUI_STB_RECT_PACK_FILENAME "my_folder/stb_rect_pack.h"
|
||||
//#define IMGUI_STB_SPRINTF_FILENAME "my_folder/stb_sprintf.h" // only used if IMGUI_USE_STB_SPRINTF is defined.
|
||||
//#define IMGUI_DISABLE_STB_TRUETYPE_IMPLEMENTATION
|
||||
//#define IMGUI_DISABLE_STB_RECT_PACK_IMPLEMENTATION
|
||||
//#define IMGUI_DISABLE_STB_SPRINTF_IMPLEMENTATION // only disabled if IMGUI_USE_STB_SPRINTF is defined.
|
||||
|
||||
//---- Use stb_sprintf.h for a faster implementation of vsnprintf instead of the one from libc (unless IMGUI_DISABLE_DEFAULT_FORMAT_FUNCTIONS is defined)
|
||||
// Compatibility checks of arguments and formats done by clang and GCC will be disabled in order to support the extra formats provided by stb_sprintf.h.
|
||||
//#define IMGUI_USE_STB_SPRINTF
|
||||
|
||||
//---- Use FreeType to build and rasterize the font atlas (instead of stb_truetype which is embedded by default in Dear ImGui)
|
||||
// Requires FreeType headers to be available in the include path. Requires program to be compiled with 'misc/freetype/imgui_freetype.cpp' (in this repository) + the FreeType library (not provided).
|
||||
// Note that imgui_freetype.cpp may be used _without_ this define, if you manually call ImFontAtlas::SetFontLoader(). The define is simply a convenience.
|
||||
// On Windows you may use vcpkg with 'vcpkg install freetype --triplet=x64-windows' + 'vcpkg integrate install'.
|
||||
//#define IMGUI_ENABLE_FREETYPE
|
||||
|
||||
//---- Use FreeType + plutosvg or lunasvg to render OpenType SVG fonts (SVGinOT)
|
||||
// Only works in combination with IMGUI_ENABLE_FREETYPE.
|
||||
// - plutosvg is currently easier to install, as e.g. it is part of vcpkg. It will support more fonts and may load them faster. See misc/freetype/README for instructions.
|
||||
// - Both require headers to be available in the include path + program to be linked with the library code (not provided).
|
||||
// - (note: lunasvg implementation is based on Freetype's rsvg-port.c which is licensed under CeCILL-C Free Software License Agreement)
|
||||
//#define IMGUI_ENABLE_FREETYPE_PLUTOSVG
|
||||
//#define IMGUI_ENABLE_FREETYPE_LUNASVG
|
||||
|
||||
//---- Use stb_truetype to build and rasterize the font atlas (default)
|
||||
// The only purpose of this define is if you want force compilation of the stb_truetype backend ALONG with the FreeType backend.
|
||||
//#define IMGUI_ENABLE_STB_TRUETYPE
|
||||
|
||||
//---- Define constructor and implicit cast operators to convert back<>forth between your math types and ImVec2/ImVec4.
|
||||
// This will be inlined as part of ImVec2 and ImVec4 class declarations.
|
||||
/*
|
||||
#define IM_VEC2_CLASS_EXTRA \
|
||||
constexpr ImVec2(const MyVec2& f) : x(f.x), y(f.y) {} \
|
||||
operator MyVec2() const { return MyVec2(x,y); }
|
||||
|
||||
#define IM_VEC4_CLASS_EXTRA \
|
||||
constexpr ImVec4(const MyVec4& f) : x(f.x), y(f.y), z(f.z), w(f.w) {} \
|
||||
operator MyVec4() const { return MyVec4(x,y,z,w); }
|
||||
*/
|
||||
//---- ...Or use Dear ImGui's own very basic math operators.
|
||||
//#define IMGUI_DEFINE_MATH_OPERATORS
|
||||
|
||||
//---- Use 32-bit vertex indices (default is 16-bit) is one way to allow large meshes with more than 64K vertices.
|
||||
// Your renderer backend will need to support it (most example renderer backends support both 16/32-bit indices).
|
||||
// Another way to allow large meshes while keeping 16-bit indices is to handle ImDrawCmd::VtxOffset in your renderer.
|
||||
// Read about ImGuiBackendFlags_RendererHasVtxOffset for details.
|
||||
//#define ImDrawIdx unsigned int
|
||||
|
||||
//---- Override ImDrawCallback signature (will need to modify renderer backends accordingly)
|
||||
//struct ImDrawList;
|
||||
//struct ImDrawCmd;
|
||||
//typedef void (*MyImDrawCallback)(const ImDrawList* draw_list, const ImDrawCmd* cmd, void* my_renderer_user_data);
|
||||
//#define ImDrawCallback MyImDrawCallback
|
||||
|
||||
//---- Debug Tools: Macro to break in Debugger (we provide a default implementation of this in the codebase)
|
||||
// (use 'Metrics->Tools->Item Picker' to pick widgets with the mouse and break into them for easy debugging.)
|
||||
//#define IM_DEBUG_BREAK IM_ASSERT(0)
|
||||
//#define IM_DEBUG_BREAK __debugbreak()
|
||||
|
||||
//---- Debug Tools: Enable highlight ID conflicts _before_ hovering items. When io.ConfigDebugHighlightIdConflicts is set.
|
||||
// (THIS WILL SLOW DOWN DEAR IMGUI. Only use occasionally and disable after use)
|
||||
//#define IMGUI_DEBUG_HIGHLIGHT_ALL_ID_CONFLICTS
|
||||
|
||||
//---- Debug Tools: Enable slower asserts
|
||||
//#define IMGUI_DEBUG_PARANOID
|
||||
|
||||
//---- Tip: You can add extra functions within the ImGui:: namespace from anywhere (e.g. your own sources/header files)
|
||||
/*
|
||||
namespace ImGui
|
||||
{
|
||||
void MyFunction(const char* name, MyMatrix44* mtx);
|
||||
}
|
||||
*/
|
||||
24007
lib/third_party/dear-imgui/imgui.cpp
vendored
Normal file
24007
lib/third_party/dear-imgui/imgui.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4445
lib/third_party/dear-imgui/imgui.h
vendored
Normal file
4445
lib/third_party/dear-imgui/imgui.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
11196
lib/third_party/dear-imgui/imgui_demo.cpp
vendored
Normal file
11196
lib/third_party/dear-imgui/imgui_demo.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6371
lib/third_party/dear-imgui/imgui_draw.cpp
vendored
Normal file
6371
lib/third_party/dear-imgui/imgui_draw.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
837
lib/third_party/dear-imgui/imgui_impl_dx11.cpp
vendored
Normal file
837
lib/third_party/dear-imgui/imgui_impl_dx11.cpp
vendored
Normal file
@ -0,0 +1,837 @@
|
||||
// dear imgui: Renderer Backend for DirectX11
|
||||
// This needs to be used along with a Platform Backend (e.g. Win32)
|
||||
|
||||
// Implemented features:
|
||||
// [X] Renderer: User texture binding. Use 'ID3D11ShaderResourceView*' as texture identifier. Read the FAQ about ImTextureID/ImTextureRef!
|
||||
// [X] Renderer: Large meshes support (64k+ vertices) even with 16-bit indices (ImGuiBackendFlags_RendererHasVtxOffset).
|
||||
// [X] Renderer: Texture updates support for dynamic font atlas (ImGuiBackendFlags_RendererHasTextures).
|
||||
// [X] Renderer: Expose selected render state for draw callbacks to use. Access in '(ImGui_ImplXXXX_RenderState*)GetPlatformIO().Renderer_RenderState'.
|
||||
// [X] Renderer: Multi-viewport support (multiple windows). Enable with 'io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable'.
|
||||
|
||||
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
|
||||
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
|
||||
// Learn about Dear ImGui:
|
||||
// - FAQ https://dearimgui.com/faq
|
||||
// - Getting Started https://dearimgui.com/getting-started
|
||||
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
|
||||
// - Introduction, links and more at the top of imgui.cpp
|
||||
|
||||
// CHANGELOG
|
||||
// (minor and older changes stripped away, please see git history for details)
|
||||
// 2025-XX-XX: Platform: Added support for multiple windows via the ImGuiPlatformIO interface.
|
||||
// 2025-09-18: Call platform_io.ClearRendererHandlers() on shutdown.
|
||||
// 2025-06-11: DirectX11: Added support for ImGuiBackendFlags_RendererHasTextures, for dynamic font atlas.
|
||||
// 2025-05-07: DirectX11: Honor draw_data->FramebufferScale to allow for custom backends and experiment using it (consistently with other renderer backends, even though in normal condition it is not set under Windows).
|
||||
// 2025-02-24: [Docking] Added undocumented ImGui_ImplDX11_SetSwapChainDescs() to configure swap chain creation for secondary viewports.
|
||||
// 2025-01-06: DirectX11: Expose VertexConstantBuffer in ImGui_ImplDX11_RenderState. Reset projection matrix in ImDrawCallback_ResetRenderState handler.
|
||||
// 2024-10-07: DirectX11: Changed default texture sampler to Clamp instead of Repeat/Wrap.
|
||||
// 2024-10-07: DirectX11: Expose selected render state in ImGui_ImplDX11_RenderState, which you can access in 'void* platform_io.Renderer_RenderState' during draw callbacks.
|
||||
// 2022-10-11: Using 'nullptr' instead of 'NULL' as per our switch to C++11.
|
||||
// 2021-06-29: Reorganized backend to pull data from a single structure to facilitate usage with multiple-contexts (all g_XXXX access changed to bd->XXXX).
|
||||
// 2021-05-19: DirectX11: Replaced direct access to ImDrawCmd::TextureId with a call to ImDrawCmd::GetTexID(). (will become a requirement)
|
||||
// 2021-02-18: DirectX11: Change blending equation to preserve alpha in output buffer.
|
||||
// 2019-08-01: DirectX11: Fixed code querying the Geometry Shader state (would generally error with Debug layer enabled).
|
||||
// 2019-07-21: DirectX11: Backup, clear and restore Geometry Shader is any is bound when calling ImGui_ImplDX11_RenderDrawData. Clearing Hull/Domain/Compute shaders without backup/restore.
|
||||
// 2019-05-29: DirectX11: Added support for large mesh (64K+ vertices), enable ImGuiBackendFlags_RendererHasVtxOffset flag.
|
||||
// 2019-04-30: DirectX11: Added support for special ImDrawCallback_ResetRenderState callback to reset render state.
|
||||
// 2018-12-03: Misc: Added #pragma comment statement to automatically link with d3dcompiler.lib when using D3DCompile().
|
||||
// 2018-11-30: Misc: Setting up io.BackendRendererName so it can be displayed in the About Window.
|
||||
// 2018-08-01: DirectX11: Querying for IDXGIFactory instead of IDXGIFactory1 to increase compatibility.
|
||||
// 2018-07-13: DirectX11: Fixed unreleased resources in Init and Shutdown functions.
|
||||
// 2018-06-08: Misc: Extracted imgui_impl_dx11.cpp/.h away from the old combined DX11+Win32 example.
|
||||
// 2018-06-08: DirectX11: Use draw_data->DisplayPos and draw_data->DisplaySize to setup projection matrix and clipping rectangle.
|
||||
// 2018-02-16: Misc: Obsoleted the io.RenderDrawListsFn callback and exposed ImGui_ImplDX11_RenderDrawData() in the .h file so you can call it yourself.
|
||||
// 2018-02-06: Misc: Removed call to ImGui::Shutdown() which is not available from 1.60 WIP, user needs to call CreateContext/DestroyContext themselves.
|
||||
// 2016-05-07: DirectX11: Disabling depth-write.
|
||||
|
||||
#include "imgui.h"
|
||||
#ifndef IMGUI_DISABLE
|
||||
#include "imgui_impl_dx11.h"
|
||||
|
||||
// DirectX
|
||||
#include <stdio.h>
|
||||
#include <d3d11.h>
|
||||
#include <d3dcompiler.h>
|
||||
#ifdef _MSC_VER
|
||||
#pragma comment(lib, "d3dcompiler") // Automatically link with d3dcompiler.lib as we are using D3DCompile() below.
|
||||
#endif
|
||||
|
||||
// Clang/GCC warnings with -Weverything
|
||||
#if defined(__clang__)
|
||||
#pragma clang diagnostic ignored "-Wold-style-cast" // warning: use of old-style cast // yes, they are more terse.
|
||||
#pragma clang diagnostic ignored "-Wsign-conversion" // warning: implicit conversion changes signedness
|
||||
#endif
|
||||
|
||||
// DirectX11 data
|
||||
struct ImGui_ImplDX11_Texture
|
||||
{
|
||||
ID3D11Texture2D* pTexture;
|
||||
ID3D11ShaderResourceView* pTextureView;
|
||||
};
|
||||
|
||||
struct ImGui_ImplDX11_Data
|
||||
{
|
||||
ID3D11Device* pd3dDevice;
|
||||
ID3D11DeviceContext* pd3dDeviceContext;
|
||||
IDXGIFactory* pFactory;
|
||||
ID3D11Buffer* pVB;
|
||||
ID3D11Buffer* pIB;
|
||||
ID3D11VertexShader* pVertexShader;
|
||||
ID3D11InputLayout* pInputLayout;
|
||||
ID3D11Buffer* pVertexConstantBuffer;
|
||||
ID3D11PixelShader* pPixelShader;
|
||||
ID3D11SamplerState* pTexSamplerLinear;
|
||||
ID3D11RasterizerState* pRasterizerState;
|
||||
ID3D11BlendState* pBlendState;
|
||||
ID3D11DepthStencilState* pDepthStencilState;
|
||||
int VertexBufferSize;
|
||||
int IndexBufferSize;
|
||||
ImVector<DXGI_SWAP_CHAIN_DESC> SwapChainDescsForViewports;
|
||||
|
||||
ImGui_ImplDX11_Data() { memset((void*)this, 0, sizeof(*this)); VertexBufferSize = 5000; IndexBufferSize = 10000; }
|
||||
};
|
||||
|
||||
struct VERTEX_CONSTANT_BUFFER_DX11
|
||||
{
|
||||
float mvp[4][4];
|
||||
};
|
||||
|
||||
// Backend data stored in io.BackendRendererUserData to allow support for multiple Dear ImGui contexts
|
||||
// It is STRONGLY preferred that you use docking branch with multi-viewports (== single Dear ImGui context + multiple windows) instead of multiple Dear ImGui contexts.
|
||||
static ImGui_ImplDX11_Data* ImGui_ImplDX11_GetBackendData()
|
||||
{
|
||||
return ImGui::GetCurrentContext() ? (ImGui_ImplDX11_Data*)ImGui::GetIO().BackendRendererUserData : nullptr;
|
||||
}
|
||||
|
||||
// Forward Declarations
|
||||
static void ImGui_ImplDX11_InitMultiViewportSupport();
|
||||
static void ImGui_ImplDX11_ShutdownMultiViewportSupport();
|
||||
|
||||
// Functions
|
||||
static void ImGui_ImplDX11_SetupRenderState(const ImDrawData* draw_data, ID3D11DeviceContext* device_ctx)
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
|
||||
// Setup viewport
|
||||
D3D11_VIEWPORT vp = {};
|
||||
vp.Width = draw_data->DisplaySize.x * draw_data->FramebufferScale.x;
|
||||
vp.Height = draw_data->DisplaySize.y * draw_data->FramebufferScale.y;
|
||||
vp.MinDepth = 0.0f;
|
||||
vp.MaxDepth = 1.0f;
|
||||
vp.TopLeftX = vp.TopLeftY = 0;
|
||||
device_ctx->RSSetViewports(1, &vp);
|
||||
|
||||
// Setup orthographic projection matrix into our constant buffer
|
||||
// Our visible imgui space lies from draw_data->DisplayPos (top left) to draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayPos is (0,0) for single viewport apps.
|
||||
D3D11_MAPPED_SUBRESOURCE mapped_resource;
|
||||
if (device_ctx->Map(bd->pVertexConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped_resource) == S_OK)
|
||||
{
|
||||
VERTEX_CONSTANT_BUFFER_DX11* constant_buffer = (VERTEX_CONSTANT_BUFFER_DX11*)mapped_resource.pData;
|
||||
float L = draw_data->DisplayPos.x;
|
||||
float R = draw_data->DisplayPos.x + draw_data->DisplaySize.x;
|
||||
float T = draw_data->DisplayPos.y;
|
||||
float B = draw_data->DisplayPos.y + draw_data->DisplaySize.y;
|
||||
float mvp[4][4] =
|
||||
{
|
||||
{ 2.0f/(R-L), 0.0f, 0.0f, 0.0f },
|
||||
{ 0.0f, 2.0f/(T-B), 0.0f, 0.0f },
|
||||
{ 0.0f, 0.0f, 0.5f, 0.0f },
|
||||
{ (R+L)/(L-R), (T+B)/(B-T), 0.5f, 1.0f },
|
||||
};
|
||||
memcpy(&constant_buffer->mvp, mvp, sizeof(mvp));
|
||||
device_ctx->Unmap(bd->pVertexConstantBuffer, 0);
|
||||
}
|
||||
|
||||
// Setup shader and vertex buffers
|
||||
unsigned int stride = sizeof(ImDrawVert);
|
||||
unsigned int offset = 0;
|
||||
device_ctx->IASetInputLayout(bd->pInputLayout);
|
||||
device_ctx->IASetVertexBuffers(0, 1, &bd->pVB, &stride, &offset);
|
||||
device_ctx->IASetIndexBuffer(bd->pIB, sizeof(ImDrawIdx) == 2 ? DXGI_FORMAT_R16_UINT : DXGI_FORMAT_R32_UINT, 0);
|
||||
device_ctx->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
|
||||
device_ctx->VSSetShader(bd->pVertexShader, nullptr, 0);
|
||||
device_ctx->VSSetConstantBuffers(0, 1, &bd->pVertexConstantBuffer);
|
||||
device_ctx->PSSetShader(bd->pPixelShader, nullptr, 0);
|
||||
device_ctx->PSSetSamplers(0, 1, &bd->pTexSamplerLinear);
|
||||
device_ctx->GSSetShader(nullptr, nullptr, 0);
|
||||
device_ctx->HSSetShader(nullptr, nullptr, 0); // In theory we should backup and restore this as well.. very infrequently used..
|
||||
device_ctx->DSSetShader(nullptr, nullptr, 0); // In theory we should backup and restore this as well.. very infrequently used..
|
||||
device_ctx->CSSetShader(nullptr, nullptr, 0); // In theory we should backup and restore this as well.. very infrequently used..
|
||||
|
||||
// Setup render state
|
||||
const float blend_factor[4] = { 0.f, 0.f, 0.f, 0.f };
|
||||
device_ctx->OMSetBlendState(bd->pBlendState, blend_factor, 0xffffffff);
|
||||
device_ctx->OMSetDepthStencilState(bd->pDepthStencilState, 0);
|
||||
device_ctx->RSSetState(bd->pRasterizerState);
|
||||
}
|
||||
|
||||
// Render function
|
||||
void ImGui_ImplDX11_RenderDrawData(ImDrawData* draw_data)
|
||||
{
|
||||
// Avoid rendering when minimized
|
||||
if (draw_data->DisplaySize.x <= 0.0f || draw_data->DisplaySize.y <= 0.0f)
|
||||
return;
|
||||
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
ID3D11DeviceContext* device = bd->pd3dDeviceContext;
|
||||
|
||||
// Catch up with texture updates. Most of the times, the list will have 1 element with an OK status, aka nothing to do.
|
||||
// (This almost always points to ImGui::GetPlatformIO().Textures[] but is part of ImDrawData to allow overriding or disabling texture updates).
|
||||
if (draw_data->Textures != nullptr)
|
||||
for (ImTextureData* tex : *draw_data->Textures)
|
||||
if (tex->Status != ImTextureStatus_OK)
|
||||
ImGui_ImplDX11_UpdateTexture(tex);
|
||||
|
||||
// Create and grow vertex/index buffers if needed
|
||||
if (!bd->pVB || bd->VertexBufferSize < draw_data->TotalVtxCount)
|
||||
{
|
||||
if (bd->pVB) { bd->pVB->Release(); bd->pVB = nullptr; }
|
||||
bd->VertexBufferSize = draw_data->TotalVtxCount + 5000;
|
||||
D3D11_BUFFER_DESC desc = {};
|
||||
desc.Usage = D3D11_USAGE_DYNAMIC;
|
||||
desc.ByteWidth = bd->VertexBufferSize * sizeof(ImDrawVert);
|
||||
desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
|
||||
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
|
||||
desc.MiscFlags = 0;
|
||||
if (bd->pd3dDevice->CreateBuffer(&desc, nullptr, &bd->pVB) < 0)
|
||||
return;
|
||||
}
|
||||
if (!bd->pIB || bd->IndexBufferSize < draw_data->TotalIdxCount)
|
||||
{
|
||||
if (bd->pIB) { bd->pIB->Release(); bd->pIB = nullptr; }
|
||||
bd->IndexBufferSize = draw_data->TotalIdxCount + 10000;
|
||||
D3D11_BUFFER_DESC desc = {};
|
||||
desc.Usage = D3D11_USAGE_DYNAMIC;
|
||||
desc.ByteWidth = bd->IndexBufferSize * sizeof(ImDrawIdx);
|
||||
desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
|
||||
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
|
||||
if (bd->pd3dDevice->CreateBuffer(&desc, nullptr, &bd->pIB) < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
// Upload vertex/index data into a single contiguous GPU buffer
|
||||
D3D11_MAPPED_SUBRESOURCE vtx_resource, idx_resource;
|
||||
if (device->Map(bd->pVB, 0, D3D11_MAP_WRITE_DISCARD, 0, &vtx_resource) != S_OK)
|
||||
return;
|
||||
if (device->Map(bd->pIB, 0, D3D11_MAP_WRITE_DISCARD, 0, &idx_resource) != S_OK)
|
||||
return;
|
||||
ImDrawVert* vtx_dst = (ImDrawVert*)vtx_resource.pData;
|
||||
ImDrawIdx* idx_dst = (ImDrawIdx*)idx_resource.pData;
|
||||
for (const ImDrawList* draw_list : draw_data->CmdLists)
|
||||
{
|
||||
memcpy(vtx_dst, draw_list->VtxBuffer.Data, draw_list->VtxBuffer.Size * sizeof(ImDrawVert));
|
||||
memcpy(idx_dst, draw_list->IdxBuffer.Data, draw_list->IdxBuffer.Size * sizeof(ImDrawIdx));
|
||||
vtx_dst += draw_list->VtxBuffer.Size;
|
||||
idx_dst += draw_list->IdxBuffer.Size;
|
||||
}
|
||||
device->Unmap(bd->pVB, 0);
|
||||
device->Unmap(bd->pIB, 0);
|
||||
|
||||
// Backup DX state that will be modified to restore it afterwards (unfortunately this is very ugly looking and verbose. Close your eyes!)
|
||||
struct BACKUP_DX11_STATE
|
||||
{
|
||||
UINT ScissorRectsCount, ViewportsCount;
|
||||
D3D11_RECT ScissorRects[D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE];
|
||||
D3D11_VIEWPORT Viewports[D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE];
|
||||
ID3D11RasterizerState* RS;
|
||||
ID3D11BlendState* BlendState;
|
||||
FLOAT BlendFactor[4];
|
||||
UINT SampleMask;
|
||||
UINT StencilRef;
|
||||
ID3D11DepthStencilState* DepthStencilState;
|
||||
ID3D11ShaderResourceView* PSShaderResource;
|
||||
ID3D11SamplerState* PSSampler;
|
||||
ID3D11PixelShader* PS;
|
||||
ID3D11VertexShader* VS;
|
||||
ID3D11GeometryShader* GS;
|
||||
UINT PSInstancesCount, VSInstancesCount, GSInstancesCount;
|
||||
ID3D11ClassInstance *PSInstances[256], *VSInstances[256], *GSInstances[256]; // 256 is max according to PSSetShader documentation
|
||||
D3D11_PRIMITIVE_TOPOLOGY PrimitiveTopology;
|
||||
ID3D11Buffer* IndexBuffer, *VertexBuffer, *VSConstantBuffer;
|
||||
UINT IndexBufferOffset, VertexBufferStride, VertexBufferOffset;
|
||||
DXGI_FORMAT IndexBufferFormat;
|
||||
ID3D11InputLayout* InputLayout;
|
||||
};
|
||||
BACKUP_DX11_STATE old = {};
|
||||
old.ScissorRectsCount = old.ViewportsCount = D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE;
|
||||
device->RSGetScissorRects(&old.ScissorRectsCount, old.ScissorRects);
|
||||
device->RSGetViewports(&old.ViewportsCount, old.Viewports);
|
||||
device->RSGetState(&old.RS);
|
||||
device->OMGetBlendState(&old.BlendState, old.BlendFactor, &old.SampleMask);
|
||||
device->OMGetDepthStencilState(&old.DepthStencilState, &old.StencilRef);
|
||||
device->PSGetShaderResources(0, 1, &old.PSShaderResource);
|
||||
device->PSGetSamplers(0, 1, &old.PSSampler);
|
||||
old.PSInstancesCount = old.VSInstancesCount = old.GSInstancesCount = 256;
|
||||
device->PSGetShader(&old.PS, old.PSInstances, &old.PSInstancesCount);
|
||||
device->VSGetShader(&old.VS, old.VSInstances, &old.VSInstancesCount);
|
||||
device->VSGetConstantBuffers(0, 1, &old.VSConstantBuffer);
|
||||
device->GSGetShader(&old.GS, old.GSInstances, &old.GSInstancesCount);
|
||||
|
||||
device->IAGetPrimitiveTopology(&old.PrimitiveTopology);
|
||||
device->IAGetIndexBuffer(&old.IndexBuffer, &old.IndexBufferFormat, &old.IndexBufferOffset);
|
||||
device->IAGetVertexBuffers(0, 1, &old.VertexBuffer, &old.VertexBufferStride, &old.VertexBufferOffset);
|
||||
device->IAGetInputLayout(&old.InputLayout);
|
||||
|
||||
// Setup desired DX state
|
||||
ImGui_ImplDX11_SetupRenderState(draw_data, device);
|
||||
|
||||
// Setup render state structure (for callbacks and custom texture bindings)
|
||||
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
|
||||
ImGui_ImplDX11_RenderState render_state;
|
||||
render_state.Device = bd->pd3dDevice;
|
||||
render_state.DeviceContext = bd->pd3dDeviceContext;
|
||||
render_state.SamplerDefault = bd->pTexSamplerLinear;
|
||||
render_state.VertexConstantBuffer = bd->pVertexConstantBuffer;
|
||||
platform_io.Renderer_RenderState = &render_state;
|
||||
|
||||
// Render command lists
|
||||
// (Because we merged all buffers into a single one, we maintain our own offset into them)
|
||||
int global_idx_offset = 0;
|
||||
int global_vtx_offset = 0;
|
||||
ImVec2 clip_off = draw_data->DisplayPos;
|
||||
ImVec2 clip_scale = draw_data->FramebufferScale;
|
||||
for (const ImDrawList* draw_list : draw_data->CmdLists)
|
||||
{
|
||||
for (int cmd_i = 0; cmd_i < draw_list->CmdBuffer.Size; cmd_i++)
|
||||
{
|
||||
const ImDrawCmd* pcmd = &draw_list->CmdBuffer[cmd_i];
|
||||
if (pcmd->UserCallback != nullptr)
|
||||
{
|
||||
// User callback, registered via ImDrawList::AddCallback()
|
||||
// (ImDrawCallback_ResetRenderState is a special callback value used by the user to request the renderer to reset render state.)
|
||||
if (pcmd->UserCallback == ImDrawCallback_ResetRenderState)
|
||||
ImGui_ImplDX11_SetupRenderState(draw_data, device);
|
||||
else
|
||||
pcmd->UserCallback(draw_list, pcmd);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Project scissor/clipping rectangles into framebuffer space
|
||||
ImVec2 clip_min((pcmd->ClipRect.x - clip_off.x) * clip_scale.x, (pcmd->ClipRect.y - clip_off.y) * clip_scale.y);
|
||||
ImVec2 clip_max((pcmd->ClipRect.z - clip_off.x) * clip_scale.x, (pcmd->ClipRect.w - clip_off.y) * clip_scale.y);
|
||||
if (clip_max.x <= clip_min.x || clip_max.y <= clip_min.y)
|
||||
continue;
|
||||
|
||||
// Apply scissor/clipping rectangle
|
||||
const D3D11_RECT r = { (LONG)clip_min.x, (LONG)clip_min.y, (LONG)clip_max.x, (LONG)clip_max.y };
|
||||
device->RSSetScissorRects(1, &r);
|
||||
|
||||
// Bind texture, Draw
|
||||
ID3D11ShaderResourceView* texture_srv = (ID3D11ShaderResourceView*)pcmd->GetTexID();
|
||||
device->PSSetShaderResources(0, 1, &texture_srv);
|
||||
device->DrawIndexed(pcmd->ElemCount, pcmd->IdxOffset + global_idx_offset, pcmd->VtxOffset + global_vtx_offset);
|
||||
}
|
||||
}
|
||||
global_idx_offset += draw_list->IdxBuffer.Size;
|
||||
global_vtx_offset += draw_list->VtxBuffer.Size;
|
||||
}
|
||||
platform_io.Renderer_RenderState = nullptr;
|
||||
|
||||
// Restore modified DX state
|
||||
device->RSSetScissorRects(old.ScissorRectsCount, old.ScissorRects);
|
||||
device->RSSetViewports(old.ViewportsCount, old.Viewports);
|
||||
device->RSSetState(old.RS); if (old.RS) old.RS->Release();
|
||||
device->OMSetBlendState(old.BlendState, old.BlendFactor, old.SampleMask); if (old.BlendState) old.BlendState->Release();
|
||||
device->OMSetDepthStencilState(old.DepthStencilState, old.StencilRef); if (old.DepthStencilState) old.DepthStencilState->Release();
|
||||
device->PSSetShaderResources(0, 1, &old.PSShaderResource); if (old.PSShaderResource) old.PSShaderResource->Release();
|
||||
device->PSSetSamplers(0, 1, &old.PSSampler); if (old.PSSampler) old.PSSampler->Release();
|
||||
device->PSSetShader(old.PS, old.PSInstances, old.PSInstancesCount); if (old.PS) old.PS->Release();
|
||||
for (UINT i = 0; i < old.PSInstancesCount; i++) if (old.PSInstances[i]) old.PSInstances[i]->Release();
|
||||
device->VSSetShader(old.VS, old.VSInstances, old.VSInstancesCount); if (old.VS) old.VS->Release();
|
||||
device->VSSetConstantBuffers(0, 1, &old.VSConstantBuffer); if (old.VSConstantBuffer) old.VSConstantBuffer->Release();
|
||||
device->GSSetShader(old.GS, old.GSInstances, old.GSInstancesCount); if (old.GS) old.GS->Release();
|
||||
for (UINT i = 0; i < old.VSInstancesCount; i++) if (old.VSInstances[i]) old.VSInstances[i]->Release();
|
||||
device->IASetPrimitiveTopology(old.PrimitiveTopology);
|
||||
device->IASetIndexBuffer(old.IndexBuffer, old.IndexBufferFormat, old.IndexBufferOffset); if (old.IndexBuffer) old.IndexBuffer->Release();
|
||||
device->IASetVertexBuffers(0, 1, &old.VertexBuffer, &old.VertexBufferStride, &old.VertexBufferOffset); if (old.VertexBuffer) old.VertexBuffer->Release();
|
||||
device->IASetInputLayout(old.InputLayout); if (old.InputLayout) old.InputLayout->Release();
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_DestroyTexture(ImTextureData* tex)
|
||||
{
|
||||
if (ImGui_ImplDX11_Texture* backend_tex = (ImGui_ImplDX11_Texture*)tex->BackendUserData)
|
||||
{
|
||||
IM_ASSERT(backend_tex->pTextureView == (ID3D11ShaderResourceView*)(intptr_t)tex->TexID);
|
||||
backend_tex->pTextureView->Release();
|
||||
backend_tex->pTexture->Release();
|
||||
IM_DELETE(backend_tex);
|
||||
|
||||
// Clear identifiers and mark as destroyed (in order to allow e.g. calling InvalidateDeviceObjects while running)
|
||||
tex->SetTexID(ImTextureID_Invalid);
|
||||
tex->BackendUserData = nullptr;
|
||||
}
|
||||
tex->SetStatus(ImTextureStatus_Destroyed);
|
||||
}
|
||||
|
||||
void ImGui_ImplDX11_UpdateTexture(ImTextureData* tex)
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
if (tex->Status == ImTextureStatus_WantCreate)
|
||||
{
|
||||
// Create and upload new texture to graphics system
|
||||
//IMGUI_DEBUG_LOG("UpdateTexture #%03d: WantCreate %dx%d\n", tex->UniqueID, tex->Width, tex->Height);
|
||||
IM_ASSERT(tex->TexID == ImTextureID_Invalid && tex->BackendUserData == nullptr);
|
||||
IM_ASSERT(tex->Format == ImTextureFormat_RGBA32);
|
||||
unsigned int* pixels = (unsigned int*)tex->GetPixels();
|
||||
ImGui_ImplDX11_Texture* backend_tex = IM_NEW(ImGui_ImplDX11_Texture)();
|
||||
|
||||
// Create texture
|
||||
D3D11_TEXTURE2D_DESC desc;
|
||||
ZeroMemory(&desc, sizeof(desc));
|
||||
desc.Width = (UINT)tex->Width;
|
||||
desc.Height = (UINT)tex->Height;
|
||||
desc.MipLevels = 1;
|
||||
desc.ArraySize = 1;
|
||||
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
|
||||
desc.SampleDesc.Count = 1;
|
||||
desc.Usage = D3D11_USAGE_DEFAULT;
|
||||
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
|
||||
desc.CPUAccessFlags = 0;
|
||||
D3D11_SUBRESOURCE_DATA subResource;
|
||||
subResource.pSysMem = pixels;
|
||||
subResource.SysMemPitch = desc.Width * 4;
|
||||
subResource.SysMemSlicePitch = 0;
|
||||
bd->pd3dDevice->CreateTexture2D(&desc, &subResource, &backend_tex->pTexture);
|
||||
IM_ASSERT(backend_tex->pTexture != nullptr && "Backend failed to create texture!");
|
||||
|
||||
// Create texture view
|
||||
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
|
||||
ZeroMemory(&srvDesc, sizeof(srvDesc));
|
||||
srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
|
||||
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
|
||||
srvDesc.Texture2D.MipLevels = desc.MipLevels;
|
||||
srvDesc.Texture2D.MostDetailedMip = 0;
|
||||
bd->pd3dDevice->CreateShaderResourceView(backend_tex->pTexture, &srvDesc, &backend_tex->pTextureView);
|
||||
IM_ASSERT(backend_tex->pTextureView != nullptr && "Backend failed to create texture!");
|
||||
|
||||
// Store identifiers
|
||||
tex->SetTexID((ImTextureID)(intptr_t)backend_tex->pTextureView);
|
||||
tex->SetStatus(ImTextureStatus_OK);
|
||||
tex->BackendUserData = backend_tex;
|
||||
}
|
||||
else if (tex->Status == ImTextureStatus_WantUpdates)
|
||||
{
|
||||
// Update selected blocks. We only ever write to textures regions which have never been used before!
|
||||
// This backend choose to use tex->Updates[] but you can use tex->UpdateRect to upload a single region.
|
||||
ImGui_ImplDX11_Texture* backend_tex = (ImGui_ImplDX11_Texture*)tex->BackendUserData;
|
||||
IM_ASSERT(backend_tex->pTextureView == (ID3D11ShaderResourceView*)(intptr_t)tex->TexID);
|
||||
for (ImTextureRect& r : tex->Updates)
|
||||
{
|
||||
D3D11_BOX box = { (UINT)r.x, (UINT)r.y, (UINT)0, (UINT)(r.x + r.w), (UINT)(r.y + r .h), (UINT)1 };
|
||||
bd->pd3dDeviceContext->UpdateSubresource(backend_tex->pTexture, 0, &box, tex->GetPixelsAt(r.x, r.y), (UINT)tex->GetPitch(), 0);
|
||||
}
|
||||
tex->SetStatus(ImTextureStatus_OK);
|
||||
}
|
||||
if (tex->Status == ImTextureStatus_WantDestroy && tex->UnusedFrames > 0)
|
||||
ImGui_ImplDX11_DestroyTexture(tex);
|
||||
}
|
||||
|
||||
bool ImGui_ImplDX11_CreateDeviceObjects()
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
if (!bd->pd3dDevice)
|
||||
return false;
|
||||
ImGui_ImplDX11_InvalidateDeviceObjects();
|
||||
|
||||
// By using D3DCompile() from <d3dcompiler.h> / d3dcompiler.lib, we introduce a dependency to a given version of d3dcompiler_XX.dll (see D3DCOMPILER_DLL_A)
|
||||
// If you would like to use this DX11 sample code but remove this dependency you can:
|
||||
// 1) compile once, save the compiled shader blobs into a file or source code and pass them to CreateVertexShader()/CreatePixelShader() [preferred solution]
|
||||
// 2) use code to detect any version of the DLL and grab a pointer to D3DCompile from the DLL.
|
||||
// See https://github.com/ocornut/imgui/pull/638 for sources and details.
|
||||
|
||||
// Create the vertex shader
|
||||
{
|
||||
static const char* vertexShader =
|
||||
"cbuffer vertexBuffer : register(b0) \
|
||||
{\
|
||||
float4x4 ProjectionMatrix; \
|
||||
};\
|
||||
struct VS_INPUT\
|
||||
{\
|
||||
float2 pos : POSITION;\
|
||||
float4 col : COLOR0;\
|
||||
float2 uv : TEXCOORD0;\
|
||||
};\
|
||||
\
|
||||
struct PS_INPUT\
|
||||
{\
|
||||
float4 pos : SV_POSITION;\
|
||||
float4 col : COLOR0;\
|
||||
float2 uv : TEXCOORD0;\
|
||||
};\
|
||||
\
|
||||
PS_INPUT main(VS_INPUT input)\
|
||||
{\
|
||||
PS_INPUT output;\
|
||||
output.pos = mul( ProjectionMatrix, float4(input.pos.xy, 0.f, 1.f));\
|
||||
output.col = input.col;\
|
||||
output.uv = input.uv;\
|
||||
return output;\
|
||||
}";
|
||||
|
||||
ID3DBlob* vertexShaderBlob;
|
||||
if (FAILED(D3DCompile(vertexShader, strlen(vertexShader), nullptr, nullptr, nullptr, "main", "vs_4_0", 0, 0, &vertexShaderBlob, nullptr)))
|
||||
return false; // NB: Pass ID3DBlob* pErrorBlob to D3DCompile() to get error showing in (const char*)pErrorBlob->GetBufferPointer(). Make sure to Release() the blob!
|
||||
if (bd->pd3dDevice->CreateVertexShader(vertexShaderBlob->GetBufferPointer(), vertexShaderBlob->GetBufferSize(), nullptr, &bd->pVertexShader) != S_OK)
|
||||
{
|
||||
vertexShaderBlob->Release();
|
||||
return false;
|
||||
}
|
||||
|
||||
// Create the input layout
|
||||
D3D11_INPUT_ELEMENT_DESC local_layout[] =
|
||||
{
|
||||
{ "POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, (UINT)offsetof(ImDrawVert, pos), D3D11_INPUT_PER_VERTEX_DATA, 0 },
|
||||
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, (UINT)offsetof(ImDrawVert, uv), D3D11_INPUT_PER_VERTEX_DATA, 0 },
|
||||
{ "COLOR", 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0, (UINT)offsetof(ImDrawVert, col), D3D11_INPUT_PER_VERTEX_DATA, 0 },
|
||||
};
|
||||
if (bd->pd3dDevice->CreateInputLayout(local_layout, 3, vertexShaderBlob->GetBufferPointer(), vertexShaderBlob->GetBufferSize(), &bd->pInputLayout) != S_OK)
|
||||
{
|
||||
vertexShaderBlob->Release();
|
||||
return false;
|
||||
}
|
||||
vertexShaderBlob->Release();
|
||||
|
||||
// Create the constant buffer
|
||||
{
|
||||
D3D11_BUFFER_DESC desc = {};
|
||||
desc.ByteWidth = sizeof(VERTEX_CONSTANT_BUFFER_DX11);
|
||||
desc.Usage = D3D11_USAGE_DYNAMIC;
|
||||
desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
|
||||
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
|
||||
desc.MiscFlags = 0;
|
||||
bd->pd3dDevice->CreateBuffer(&desc, nullptr, &bd->pVertexConstantBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
// Create the pixel shader
|
||||
{
|
||||
static const char* pixelShader =
|
||||
"struct PS_INPUT\
|
||||
{\
|
||||
float4 pos : SV_POSITION;\
|
||||
float4 col : COLOR0;\
|
||||
float2 uv : TEXCOORD0;\
|
||||
};\
|
||||
sampler sampler0;\
|
||||
Texture2D texture0;\
|
||||
\
|
||||
float4 main(PS_INPUT input) : SV_Target\
|
||||
{\
|
||||
float4 out_col = input.col * texture0.Sample(sampler0, input.uv); \
|
||||
return out_col; \
|
||||
}";
|
||||
|
||||
ID3DBlob* pixelShaderBlob;
|
||||
if (FAILED(D3DCompile(pixelShader, strlen(pixelShader), nullptr, nullptr, nullptr, "main", "ps_4_0", 0, 0, &pixelShaderBlob, nullptr)))
|
||||
return false; // NB: Pass ID3DBlob* pErrorBlob to D3DCompile() to get error showing in (const char*)pErrorBlob->GetBufferPointer(). Make sure to Release() the blob!
|
||||
if (bd->pd3dDevice->CreatePixelShader(pixelShaderBlob->GetBufferPointer(), pixelShaderBlob->GetBufferSize(), nullptr, &bd->pPixelShader) != S_OK)
|
||||
{
|
||||
pixelShaderBlob->Release();
|
||||
return false;
|
||||
}
|
||||
pixelShaderBlob->Release();
|
||||
}
|
||||
|
||||
// Create the blending setup
|
||||
{
|
||||
D3D11_BLEND_DESC desc;
|
||||
ZeroMemory(&desc, sizeof(desc));
|
||||
desc.AlphaToCoverageEnable = false;
|
||||
desc.RenderTarget[0].BlendEnable = true;
|
||||
desc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
|
||||
desc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
|
||||
desc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
|
||||
desc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
|
||||
desc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_INV_SRC_ALPHA;
|
||||
desc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
|
||||
desc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
|
||||
bd->pd3dDevice->CreateBlendState(&desc, &bd->pBlendState);
|
||||
}
|
||||
|
||||
// Create the rasterizer state
|
||||
{
|
||||
D3D11_RASTERIZER_DESC desc;
|
||||
ZeroMemory(&desc, sizeof(desc));
|
||||
desc.FillMode = D3D11_FILL_SOLID;
|
||||
desc.CullMode = D3D11_CULL_NONE;
|
||||
desc.ScissorEnable = true;
|
||||
desc.DepthClipEnable = true;
|
||||
bd->pd3dDevice->CreateRasterizerState(&desc, &bd->pRasterizerState);
|
||||
}
|
||||
|
||||
// Create depth-stencil State
|
||||
{
|
||||
D3D11_DEPTH_STENCIL_DESC desc;
|
||||
ZeroMemory(&desc, sizeof(desc));
|
||||
desc.DepthEnable = false;
|
||||
desc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
|
||||
desc.DepthFunc = D3D11_COMPARISON_ALWAYS;
|
||||
desc.StencilEnable = false;
|
||||
desc.FrontFace.StencilFailOp = desc.FrontFace.StencilDepthFailOp = desc.FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
|
||||
desc.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
|
||||
desc.BackFace = desc.FrontFace;
|
||||
bd->pd3dDevice->CreateDepthStencilState(&desc, &bd->pDepthStencilState);
|
||||
}
|
||||
|
||||
// Create texture sampler
|
||||
// (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling)
|
||||
{
|
||||
D3D11_SAMPLER_DESC desc;
|
||||
ZeroMemory(&desc, sizeof(desc));
|
||||
desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
|
||||
desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
|
||||
desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
|
||||
desc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
|
||||
desc.MipLODBias = 0.f;
|
||||
desc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
|
||||
desc.MinLOD = 0.f;
|
||||
desc.MaxLOD = 0.f;
|
||||
bd->pd3dDevice->CreateSamplerState(&desc, &bd->pTexSamplerLinear);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ImGui_ImplDX11_InvalidateDeviceObjects()
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
if (!bd->pd3dDevice)
|
||||
return;
|
||||
|
||||
// Destroy all textures
|
||||
for (ImTextureData* tex : ImGui::GetPlatformIO().Textures)
|
||||
if (tex->RefCount == 1)
|
||||
ImGui_ImplDX11_DestroyTexture(tex);
|
||||
|
||||
if (bd->pTexSamplerLinear) { bd->pTexSamplerLinear->Release(); bd->pTexSamplerLinear = nullptr; }
|
||||
if (bd->pIB) { bd->pIB->Release(); bd->pIB = nullptr; }
|
||||
if (bd->pVB) { bd->pVB->Release(); bd->pVB = nullptr; }
|
||||
if (bd->pBlendState) { bd->pBlendState->Release(); bd->pBlendState = nullptr; }
|
||||
if (bd->pDepthStencilState) { bd->pDepthStencilState->Release(); bd->pDepthStencilState = nullptr; }
|
||||
if (bd->pRasterizerState) { bd->pRasterizerState->Release(); bd->pRasterizerState = nullptr; }
|
||||
if (bd->pPixelShader) { bd->pPixelShader->Release(); bd->pPixelShader = nullptr; }
|
||||
if (bd->pVertexConstantBuffer) { bd->pVertexConstantBuffer->Release(); bd->pVertexConstantBuffer = nullptr; }
|
||||
if (bd->pInputLayout) { bd->pInputLayout->Release(); bd->pInputLayout = nullptr; }
|
||||
if (bd->pVertexShader) { bd->pVertexShader->Release(); bd->pVertexShader = nullptr; }
|
||||
}
|
||||
|
||||
bool ImGui_ImplDX11_Init(ID3D11Device* device, ID3D11DeviceContext* device_context)
|
||||
{
|
||||
ImGuiIO& io = ImGui::GetIO();
|
||||
IMGUI_CHECKVERSION();
|
||||
IM_ASSERT(io.BackendRendererUserData == nullptr && "Already initialized a renderer backend!");
|
||||
|
||||
// Setup backend capabilities flags
|
||||
ImGui_ImplDX11_Data* bd = IM_NEW(ImGui_ImplDX11_Data)();
|
||||
io.BackendRendererUserData = (void*)bd;
|
||||
io.BackendRendererName = "imgui_impl_dx11";
|
||||
io.BackendFlags |= ImGuiBackendFlags_RendererHasVtxOffset; // We can honor the ImDrawCmd::VtxOffset field, allowing for large meshes.
|
||||
io.BackendFlags |= ImGuiBackendFlags_RendererHasTextures; // We can honor ImGuiPlatformIO::Textures[] requests during render.
|
||||
io.BackendFlags |= ImGuiBackendFlags_RendererHasViewports; // We can create multi-viewports on the Renderer side (optional)
|
||||
|
||||
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
|
||||
platform_io.Renderer_TextureMaxWidth = platform_io.Renderer_TextureMaxHeight = D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION;
|
||||
|
||||
// Get factory from device
|
||||
IDXGIDevice* pDXGIDevice = nullptr;
|
||||
IDXGIAdapter* pDXGIAdapter = nullptr;
|
||||
IDXGIFactory* pFactory = nullptr;
|
||||
|
||||
if (device->QueryInterface(IID_PPV_ARGS(&pDXGIDevice)) == S_OK)
|
||||
if (pDXGIDevice->GetParent(IID_PPV_ARGS(&pDXGIAdapter)) == S_OK)
|
||||
if (pDXGIAdapter->GetParent(IID_PPV_ARGS(&pFactory)) == S_OK)
|
||||
{
|
||||
bd->pd3dDevice = device;
|
||||
bd->pd3dDeviceContext = device_context;
|
||||
bd->pFactory = pFactory;
|
||||
}
|
||||
if (pDXGIDevice) pDXGIDevice->Release();
|
||||
if (pDXGIAdapter) pDXGIAdapter->Release();
|
||||
bd->pd3dDevice->AddRef();
|
||||
bd->pd3dDeviceContext->AddRef();
|
||||
|
||||
ImGui_ImplDX11_InitMultiViewportSupport();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ImGui_ImplDX11_Shutdown()
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
IM_ASSERT(bd != nullptr && "No renderer backend to shutdown, or already shutdown?");
|
||||
ImGuiIO& io = ImGui::GetIO();
|
||||
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
|
||||
|
||||
ImGui_ImplDX11_ShutdownMultiViewportSupport();
|
||||
ImGui_ImplDX11_InvalidateDeviceObjects();
|
||||
if (bd->pFactory) { bd->pFactory->Release(); }
|
||||
if (bd->pd3dDevice) { bd->pd3dDevice->Release(); }
|
||||
if (bd->pd3dDeviceContext) { bd->pd3dDeviceContext->Release(); }
|
||||
|
||||
io.BackendRendererName = nullptr;
|
||||
io.BackendRendererUserData = nullptr;
|
||||
io.BackendFlags &= ~(ImGuiBackendFlags_RendererHasVtxOffset | ImGuiBackendFlags_RendererHasTextures | ImGuiBackendFlags_RendererHasViewports);
|
||||
platform_io.ClearRendererHandlers();
|
||||
IM_DELETE(bd);
|
||||
}
|
||||
|
||||
void ImGui_ImplDX11_NewFrame()
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
IM_ASSERT(bd != nullptr && "Context or backend not initialized! Did you call ImGui_ImplDX11_Init()?");
|
||||
|
||||
if (!bd->pVertexShader)
|
||||
if (!ImGui_ImplDX11_CreateDeviceObjects())
|
||||
IM_ASSERT(0 && "ImGui_ImplDX11_CreateDeviceObjects() failed!");
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------------------------------
|
||||
// MULTI-VIEWPORT / PLATFORM INTERFACE SUPPORT
|
||||
// This is an _advanced_ and _optional_ feature, allowing the backend to create and handle multiple viewports simultaneously.
|
||||
// If you are new to dear imgui or creating a new binding for dear imgui, it is recommended that you completely ignore this section first..
|
||||
//--------------------------------------------------------------------------------------------------------
|
||||
|
||||
// Helper structure we store in the void* RendererUserData field of each ImGuiViewport to easily retrieve our backend data.
|
||||
struct ImGui_ImplDX11_ViewportData
|
||||
{
|
||||
IDXGISwapChain* SwapChain;
|
||||
ID3D11RenderTargetView* RTView;
|
||||
|
||||
ImGui_ImplDX11_ViewportData() { SwapChain = nullptr; RTView = nullptr; }
|
||||
~ImGui_ImplDX11_ViewportData() { IM_ASSERT(SwapChain == nullptr && RTView == nullptr); }
|
||||
};
|
||||
|
||||
// Multi-Viewports: configure templates used when creating swapchains for secondary viewports. Will try them in order.
|
||||
// This is intentionally not declared in the .h file yet, so you will need to copy this declaration:
|
||||
void ImGui_ImplDX11_SetSwapChainDescs(const DXGI_SWAP_CHAIN_DESC* desc_templates, int desc_templates_count);
|
||||
void ImGui_ImplDX11_SetSwapChainDescs(const DXGI_SWAP_CHAIN_DESC* desc_templates, int desc_templates_count)
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
bd->SwapChainDescsForViewports.resize(desc_templates_count);
|
||||
memcpy(bd->SwapChainDescsForViewports.Data, desc_templates, sizeof(DXGI_SWAP_CHAIN_DESC));
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_CreateWindow(ImGuiViewport* viewport)
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
ImGui_ImplDX11_ViewportData* vd = IM_NEW(ImGui_ImplDX11_ViewportData)();
|
||||
viewport->RendererUserData = vd;
|
||||
|
||||
// PlatformHandleRaw should always be a HWND, whereas PlatformHandle might be a higher-level handle (e.g. GLFWWindow*, SDL's WindowID).
|
||||
// Some backends will leave PlatformHandleRaw == 0, in which case we assume PlatformHandle will contain the HWND.
|
||||
HWND hwnd = viewport->PlatformHandleRaw ? (HWND)viewport->PlatformHandleRaw : (HWND)viewport->PlatformHandle;
|
||||
IM_ASSERT(hwnd != 0);
|
||||
IM_ASSERT(vd->SwapChain == nullptr && vd->RTView == nullptr);
|
||||
|
||||
// Create swap chain
|
||||
HRESULT hr = DXGI_ERROR_UNSUPPORTED;
|
||||
for (const DXGI_SWAP_CHAIN_DESC& sd_template : bd->SwapChainDescsForViewports)
|
||||
{
|
||||
IM_ASSERT(sd_template.BufferDesc.Width == 0 && sd_template.BufferDesc.Height == 0 && sd_template.OutputWindow == nullptr);
|
||||
DXGI_SWAP_CHAIN_DESC sd = sd_template;
|
||||
sd.BufferDesc.Width = (UINT)viewport->Size.x;
|
||||
sd.BufferDesc.Height = (UINT)viewport->Size.y;
|
||||
sd.OutputWindow = hwnd;
|
||||
hr = bd->pFactory->CreateSwapChain(bd->pd3dDevice, &sd, &vd->SwapChain);
|
||||
if (SUCCEEDED(hr))
|
||||
break;
|
||||
}
|
||||
IM_ASSERT(SUCCEEDED(hr));
|
||||
bd->pFactory->MakeWindowAssociation(hwnd, DXGI_MWA_NO_ALT_ENTER | DXGI_MWA_NO_WINDOW_CHANGES); // Disable e.g. Alt+Enter
|
||||
|
||||
// Create the render target
|
||||
if (vd->SwapChain != nullptr)
|
||||
{
|
||||
ID3D11Texture2D* pBackBuffer;
|
||||
vd->SwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer));
|
||||
bd->pd3dDevice->CreateRenderTargetView(pBackBuffer, nullptr, &vd->RTView);
|
||||
pBackBuffer->Release();
|
||||
}
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_DestroyWindow(ImGuiViewport* viewport)
|
||||
{
|
||||
// The main viewport (owned by the application) will always have RendererUserData == nullptr since we didn't create the data for it.
|
||||
if (ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData)
|
||||
{
|
||||
if (vd->SwapChain)
|
||||
vd->SwapChain->Release();
|
||||
vd->SwapChain = nullptr;
|
||||
if (vd->RTView)
|
||||
vd->RTView->Release();
|
||||
vd->RTView = nullptr;
|
||||
IM_DELETE(vd);
|
||||
}
|
||||
viewport->RendererUserData = nullptr;
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_SetWindowSize(ImGuiViewport* viewport, ImVec2 size)
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData;
|
||||
if (vd->RTView)
|
||||
{
|
||||
vd->RTView->Release();
|
||||
vd->RTView = nullptr;
|
||||
}
|
||||
if (vd->SwapChain)
|
||||
{
|
||||
ID3D11Texture2D* pBackBuffer = nullptr;
|
||||
vd->SwapChain->ResizeBuffers(0, (UINT)size.x, (UINT)size.y, DXGI_FORMAT_UNKNOWN, 0);
|
||||
vd->SwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer));
|
||||
if (pBackBuffer == nullptr) { fprintf(stderr, "ImGui_ImplDX11_SetWindowSize() failed creating buffers.\n"); return; }
|
||||
bd->pd3dDevice->CreateRenderTargetView(pBackBuffer, nullptr, &vd->RTView);
|
||||
pBackBuffer->Release();
|
||||
}
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_RenderWindow(ImGuiViewport* viewport, void*)
|
||||
{
|
||||
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
|
||||
ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData;
|
||||
ImVec4 clear_color = ImVec4(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
bd->pd3dDeviceContext->OMSetRenderTargets(1, &vd->RTView, nullptr);
|
||||
if (!(viewport->Flags & ImGuiViewportFlags_NoRendererClear))
|
||||
bd->pd3dDeviceContext->ClearRenderTargetView(vd->RTView, (float*)&clear_color);
|
||||
ImGui_ImplDX11_RenderDrawData(viewport->DrawData);
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_SwapBuffers(ImGuiViewport* viewport, void*)
|
||||
{
|
||||
ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData;
|
||||
if (vd->SwapChain)
|
||||
vd->SwapChain->Present(0, 0); // Present without vsync
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_InitMultiViewportSupport()
|
||||
{
|
||||
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
|
||||
platform_io.Renderer_CreateWindow = ImGui_ImplDX11_CreateWindow;
|
||||
platform_io.Renderer_DestroyWindow = ImGui_ImplDX11_DestroyWindow;
|
||||
platform_io.Renderer_SetWindowSize = ImGui_ImplDX11_SetWindowSize;
|
||||
platform_io.Renderer_RenderWindow = ImGui_ImplDX11_RenderWindow;
|
||||
platform_io.Renderer_SwapBuffers = ImGui_ImplDX11_SwapBuffers;
|
||||
|
||||
// Default swapchain format
|
||||
DXGI_SWAP_CHAIN_DESC sd;
|
||||
ZeroMemory(&sd, sizeof(sd));
|
||||
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
|
||||
sd.SampleDesc.Count = 1;
|
||||
sd.SampleDesc.Quality = 0;
|
||||
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
|
||||
sd.BufferCount = 1;
|
||||
sd.Windowed = TRUE;
|
||||
sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
|
||||
sd.Flags = 0;
|
||||
ImGui_ImplDX11_SetSwapChainDescs(&sd, 1);
|
||||
}
|
||||
|
||||
static void ImGui_ImplDX11_ShutdownMultiViewportSupport()
|
||||
{
|
||||
ImGui::DestroyPlatformWindows();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
#endif // #ifndef IMGUI_DISABLE
|
||||
52
lib/third_party/dear-imgui/imgui_impl_dx11.h
vendored
Normal file
52
lib/third_party/dear-imgui/imgui_impl_dx11.h
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// dear imgui: Renderer Backend for DirectX11
|
||||
// This needs to be used along with a Platform Backend (e.g. Win32)
|
||||
|
||||
// Implemented features:
|
||||
// [X] Renderer: User texture binding. Use 'ID3D11ShaderResourceView*' as texture identifier. Read the FAQ about ImTextureID/ImTextureRef!
|
||||
// [X] Renderer: Large meshes support (64k+ vertices) even with 16-bit indices (ImGuiBackendFlags_RendererHasVtxOffset).
|
||||
// [X] Renderer: Texture updates support for dynamic font atlas (ImGuiBackendFlags_RendererHasTextures).
|
||||
// [X] Renderer: Expose selected render state for draw callbacks to use. Access in '(ImGui_ImplXXXX_RenderState*)GetPlatformIO().Renderer_RenderState'.
|
||||
// [X] Renderer: Multi-viewport support (multiple windows). Enable with 'io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable'.
|
||||
|
||||
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
|
||||
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
|
||||
// Learn about Dear ImGui:
|
||||
// - FAQ https://dearimgui.com/faq
|
||||
// - Getting Started https://dearimgui.com/getting-started
|
||||
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
|
||||
// - Introduction, links and more at the top of imgui.cpp
|
||||
|
||||
#pragma once
|
||||
#include "imgui.h" // IMGUI_IMPL_API
|
||||
#ifndef IMGUI_DISABLE
|
||||
|
||||
struct ID3D11Device;
|
||||
struct ID3D11DeviceContext;
|
||||
struct ID3D11SamplerState;
|
||||
struct ID3D11Buffer;
|
||||
|
||||
// Follow "Getting Started" link and check examples/ folder to learn about using backends!
|
||||
IMGUI_IMPL_API bool ImGui_ImplDX11_Init(ID3D11Device* device, ID3D11DeviceContext* device_context);
|
||||
IMGUI_IMPL_API void ImGui_ImplDX11_Shutdown();
|
||||
IMGUI_IMPL_API void ImGui_ImplDX11_NewFrame();
|
||||
IMGUI_IMPL_API void ImGui_ImplDX11_RenderDrawData(ImDrawData* draw_data);
|
||||
|
||||
// Use if you want to reset your rendering device without losing Dear ImGui state.
|
||||
IMGUI_IMPL_API bool ImGui_ImplDX11_CreateDeviceObjects();
|
||||
IMGUI_IMPL_API void ImGui_ImplDX11_InvalidateDeviceObjects();
|
||||
|
||||
// (Advanced) Use e.g. if you need to precisely control the timing of texture updates (e.g. for staged rendering), by setting ImDrawData::Textures = NULL to handle this manually.
|
||||
IMGUI_IMPL_API void ImGui_ImplDX11_UpdateTexture(ImTextureData* tex);
|
||||
|
||||
// [BETA] Selected render state data shared with callbacks.
|
||||
// This is temporarily stored in GetPlatformIO().Renderer_RenderState during the ImGui_ImplDX11_RenderDrawData() call.
|
||||
// (Please open an issue if you feel you need access to more data)
|
||||
struct ImGui_ImplDX11_RenderState
|
||||
{
|
||||
ID3D11Device* Device;
|
||||
ID3D11DeviceContext* DeviceContext;
|
||||
ID3D11SamplerState* SamplerDefault;
|
||||
ID3D11Buffer* VertexConstantBuffer;
|
||||
};
|
||||
|
||||
#endif // #ifndef IMGUI_DISABLE
|
||||
1476
lib/third_party/dear-imgui/imgui_impl_win32.cpp
vendored
Normal file
1476
lib/third_party/dear-imgui/imgui_impl_win32.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
54
lib/third_party/dear-imgui/imgui_impl_win32.h
vendored
Normal file
54
lib/third_party/dear-imgui/imgui_impl_win32.h
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// dear imgui: Platform Backend for Windows (standard windows API for 32-bits AND 64-bits applications)
|
||||
// This needs to be used along with a Renderer (e.g. DirectX11, OpenGL3, Vulkan..)
|
||||
|
||||
// Implemented features:
|
||||
// [X] Platform: Clipboard support (for Win32 this is actually part of core dear imgui)
|
||||
// [X] Platform: Mouse support. Can discriminate Mouse/TouchScreen/Pen.
|
||||
// [X] Platform: Keyboard support. Since 1.87 we are using the io.AddKeyEvent() function. Pass ImGuiKey values to all key functions e.g. ImGui::IsKeyPressed(ImGuiKey_Space). [Legacy VK_* values are obsolete since 1.87 and not supported since 1.91.5]
|
||||
// [X] Platform: Gamepad support.
|
||||
// [X] Platform: Mouse cursor shape and visibility (ImGuiBackendFlags_HasMouseCursors). Disable with 'io.ConfigFlags |= ImGuiConfigFlags_NoMouseCursorChange'.
|
||||
// [X] Platform: Multi-viewport support (multiple windows). Enable with 'io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable'.
|
||||
|
||||
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
|
||||
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
|
||||
// Learn about Dear ImGui:
|
||||
// - FAQ https://dearimgui.com/faq
|
||||
// - Getting Started https://dearimgui.com/getting-started
|
||||
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
|
||||
// - Introduction, links and more at the top of imgui.cpp
|
||||
|
||||
#pragma once
|
||||
#include "imgui.h" // IMGUI_IMPL_API
|
||||
#ifndef IMGUI_DISABLE
|
||||
|
||||
// Follow "Getting Started" link and check examples/ folder to learn about using backends!
|
||||
IMGUI_IMPL_API bool ImGui_ImplWin32_Init(void* hwnd);
|
||||
IMGUI_IMPL_API bool ImGui_ImplWin32_InitForOpenGL(void* hwnd);
|
||||
IMGUI_IMPL_API void ImGui_ImplWin32_Shutdown();
|
||||
IMGUI_IMPL_API void ImGui_ImplWin32_NewFrame();
|
||||
|
||||
// Win32 message handler your application need to call.
|
||||
// - Intentionally commented out in a '#if 0' block to avoid dragging dependencies on <windows.h> from this helper.
|
||||
// - You should COPY the line below into your .cpp code to forward declare the function and then you can call it.
|
||||
// - Call from your application's message handler. Keep calling your message handler unless this function returns TRUE.
|
||||
|
||||
#if 0
|
||||
extern IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam);
|
||||
#endif
|
||||
|
||||
// DPI-related helpers (optional)
|
||||
// - Use to enable DPI awareness without having to create an application manifest.
|
||||
// - Your own app may already do this via a manifest or explicit calls. This is mostly useful for our examples/ apps.
|
||||
// - In theory we could call simple functions from Windows SDK such as SetProcessDPIAware(), SetProcessDpiAwareness(), etc.
|
||||
// but most of the functions provided by Microsoft require Windows 8.1/10+ SDK at compile time and Windows 8/10+ at runtime,
|
||||
// neither we want to require the user to have. So we dynamically select and load those functions to avoid dependencies.
|
||||
IMGUI_IMPL_API void ImGui_ImplWin32_EnableDpiAwareness();
|
||||
IMGUI_IMPL_API float ImGui_ImplWin32_GetDpiScaleForHwnd(void* hwnd); // HWND hwnd
|
||||
IMGUI_IMPL_API float ImGui_ImplWin32_GetDpiScaleForMonitor(void* monitor); // HMONITOR monitor
|
||||
|
||||
// Transparency related helpers (optional) [experimental]
|
||||
// - Use to enable alpha compositing transparency with the desktop.
|
||||
// - Use together with e.g. clearing your framebuffer with zero-alpha.
|
||||
IMGUI_IMPL_API void ImGui_ImplWin32_EnableAlphaCompositing(void* hwnd); // HWND hwnd
|
||||
|
||||
#endif // #ifndef IMGUI_DISABLE
|
||||
4250
lib/third_party/dear-imgui/imgui_internal.h
vendored
Normal file
4250
lib/third_party/dear-imgui/imgui_internal.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4569
lib/third_party/dear-imgui/imgui_tables.cpp
vendored
Normal file
4569
lib/third_party/dear-imgui/imgui_tables.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
10912
lib/third_party/dear-imgui/imgui_widgets.cpp
vendored
Normal file
10912
lib/third_party/dear-imgui/imgui_widgets.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
627
lib/third_party/dear-imgui/imstb_rectpack.h
vendored
Normal file
627
lib/third_party/dear-imgui/imstb_rectpack.h
vendored
Normal file
@ -0,0 +1,627 @@
|
||||
// [DEAR IMGUI]
|
||||
// This is a slightly modified version of stb_rect_pack.h 1.01.
|
||||
// Grep for [DEAR IMGUI] to find the changes.
|
||||
//
|
||||
// stb_rect_pack.h - v1.01 - public domain - rectangle packing
|
||||
// Sean Barrett 2014
|
||||
//
|
||||
// Useful for e.g. packing rectangular textures into an atlas.
|
||||
// Does not do rotation.
|
||||
//
|
||||
// Before #including,
|
||||
//
|
||||
// #define STB_RECT_PACK_IMPLEMENTATION
|
||||
//
|
||||
// in the file that you want to have the implementation.
|
||||
//
|
||||
// Not necessarily the awesomest packing method, but better than
|
||||
// the totally naive one in stb_truetype (which is primarily what
|
||||
// this is meant to replace).
|
||||
//
|
||||
// Has only had a few tests run, may have issues.
|
||||
//
|
||||
// More docs to come.
|
||||
//
|
||||
// No memory allocations; uses qsort() and assert() from stdlib.
|
||||
// Can override those by defining STBRP_SORT and STBRP_ASSERT.
|
||||
//
|
||||
// This library currently uses the Skyline Bottom-Left algorithm.
|
||||
//
|
||||
// Please note: better rectangle packers are welcome! Please
|
||||
// implement them to the same API, but with a different init
|
||||
// function.
|
||||
//
|
||||
// Credits
|
||||
//
|
||||
// Library
|
||||
// Sean Barrett
|
||||
// Minor features
|
||||
// Martins Mozeiko
|
||||
// github:IntellectualKitty
|
||||
//
|
||||
// Bugfixes / warning fixes
|
||||
// Jeremy Jaussaud
|
||||
// Fabian Giesen
|
||||
//
|
||||
// Version history:
|
||||
//
|
||||
// 1.01 (2021-07-11) always use large rect mode, expose STBRP__MAXVAL in public section
|
||||
// 1.00 (2019-02-25) avoid small space waste; gracefully fail too-wide rectangles
|
||||
// 0.99 (2019-02-07) warning fixes
|
||||
// 0.11 (2017-03-03) return packing success/fail result
|
||||
// 0.10 (2016-10-25) remove cast-away-const to avoid warnings
|
||||
// 0.09 (2016-08-27) fix compiler warnings
|
||||
// 0.08 (2015-09-13) really fix bug with empty rects (w=0 or h=0)
|
||||
// 0.07 (2015-09-13) fix bug with empty rects (w=0 or h=0)
|
||||
// 0.06 (2015-04-15) added STBRP_SORT to allow replacing qsort
|
||||
// 0.05: added STBRP_ASSERT to allow replacing assert
|
||||
// 0.04: fixed minor bug in STBRP_LARGE_RECTS support
|
||||
// 0.01: initial release
|
||||
//
|
||||
// LICENSE
|
||||
//
|
||||
// See end of file for license information.
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// INCLUDE SECTION
|
||||
//
|
||||
|
||||
#ifndef STB_INCLUDE_STB_RECT_PACK_H
|
||||
#define STB_INCLUDE_STB_RECT_PACK_H
|
||||
|
||||
#define STB_RECT_PACK_VERSION 1
|
||||
|
||||
#ifdef STBRP_STATIC
|
||||
#define STBRP_DEF static
|
||||
#else
|
||||
#define STBRP_DEF extern
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct stbrp_context stbrp_context;
|
||||
typedef struct stbrp_node stbrp_node;
|
||||
typedef struct stbrp_rect stbrp_rect;
|
||||
|
||||
typedef int stbrp_coord;
|
||||
|
||||
#define STBRP__MAXVAL 0x7fffffff
|
||||
// Mostly for internal use, but this is the maximum supported coordinate value.
|
||||
|
||||
STBRP_DEF int stbrp_pack_rects (stbrp_context *context, stbrp_rect *rects, int num_rects);
|
||||
// Assign packed locations to rectangles. The rectangles are of type
|
||||
// 'stbrp_rect' defined below, stored in the array 'rects', and there
|
||||
// are 'num_rects' many of them.
|
||||
//
|
||||
// Rectangles which are successfully packed have the 'was_packed' flag
|
||||
// set to a non-zero value and 'x' and 'y' store the minimum location
|
||||
// on each axis (i.e. bottom-left in cartesian coordinates, top-left
|
||||
// if you imagine y increasing downwards). Rectangles which do not fit
|
||||
// have the 'was_packed' flag set to 0.
|
||||
//
|
||||
// You should not try to access the 'rects' array from another thread
|
||||
// while this function is running, as the function temporarily reorders
|
||||
// the array while it executes.
|
||||
//
|
||||
// To pack into another rectangle, you need to call stbrp_init_target
|
||||
// again. To continue packing into the same rectangle, you can call
|
||||
// this function again. Calling this multiple times with multiple rect
|
||||
// arrays will probably produce worse packing results than calling it
|
||||
// a single time with the full rectangle array, but the option is
|
||||
// available.
|
||||
//
|
||||
// The function returns 1 if all of the rectangles were successfully
|
||||
// packed and 0 otherwise.
|
||||
|
||||
struct stbrp_rect
|
||||
{
|
||||
// reserved for your use:
|
||||
int id;
|
||||
|
||||
// input:
|
||||
stbrp_coord w, h;
|
||||
|
||||
// output:
|
||||
stbrp_coord x, y;
|
||||
int was_packed; // non-zero if valid packing
|
||||
|
||||
}; // 16 bytes, nominally
|
||||
|
||||
|
||||
STBRP_DEF void stbrp_init_target (stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes);
|
||||
// Initialize a rectangle packer to:
|
||||
// pack a rectangle that is 'width' by 'height' in dimensions
|
||||
// using temporary storage provided by the array 'nodes', which is 'num_nodes' long
|
||||
//
|
||||
// You must call this function every time you start packing into a new target.
|
||||
//
|
||||
// There is no "shutdown" function. The 'nodes' memory must stay valid for
|
||||
// the following stbrp_pack_rects() call (or calls), but can be freed after
|
||||
// the call (or calls) finish.
|
||||
//
|
||||
// Note: to guarantee best results, either:
|
||||
// 1. make sure 'num_nodes' >= 'width'
|
||||
// or 2. call stbrp_allow_out_of_mem() defined below with 'allow_out_of_mem = 1'
|
||||
//
|
||||
// If you don't do either of the above things, widths will be quantized to multiples
|
||||
// of small integers to guarantee the algorithm doesn't run out of temporary storage.
|
||||
//
|
||||
// If you do #2, then the non-quantized algorithm will be used, but the algorithm
|
||||
// may run out of temporary storage and be unable to pack some rectangles.
|
||||
|
||||
STBRP_DEF void stbrp_setup_allow_out_of_mem (stbrp_context *context, int allow_out_of_mem);
|
||||
// Optionally call this function after init but before doing any packing to
|
||||
// change the handling of the out-of-temp-memory scenario, described above.
|
||||
// If you call init again, this will be reset to the default (false).
|
||||
|
||||
|
||||
STBRP_DEF void stbrp_setup_heuristic (stbrp_context *context, int heuristic);
|
||||
// Optionally select which packing heuristic the library should use. Different
|
||||
// heuristics will produce better/worse results for different data sets.
|
||||
// If you call init again, this will be reset to the default.
|
||||
|
||||
enum
|
||||
{
|
||||
STBRP_HEURISTIC_Skyline_default=0,
|
||||
STBRP_HEURISTIC_Skyline_BL_sortHeight = STBRP_HEURISTIC_Skyline_default,
|
||||
STBRP_HEURISTIC_Skyline_BF_sortHeight
|
||||
};
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// the details of the following structures don't matter to you, but they must
|
||||
// be visible so you can handle the memory allocations for them
|
||||
|
||||
struct stbrp_node
|
||||
{
|
||||
stbrp_coord x,y;
|
||||
stbrp_node *next;
|
||||
};
|
||||
|
||||
struct stbrp_context
|
||||
{
|
||||
int width;
|
||||
int height;
|
||||
int align;
|
||||
int init_mode;
|
||||
int heuristic;
|
||||
int num_nodes;
|
||||
stbrp_node *active_head;
|
||||
stbrp_node *free_head;
|
||||
stbrp_node extra[2]; // we allocate two extra nodes so optimal user-node-count is 'width' not 'width+2'
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPLEMENTATION SECTION
|
||||
//
|
||||
|
||||
#ifdef STB_RECT_PACK_IMPLEMENTATION
|
||||
#ifndef STBRP_SORT
|
||||
#include <stdlib.h>
|
||||
#define STBRP_SORT qsort
|
||||
#endif
|
||||
|
||||
#ifndef STBRP_ASSERT
|
||||
#include <assert.h>
|
||||
#define STBRP_ASSERT assert
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define STBRP__NOTUSED(v) (void)(v)
|
||||
#define STBRP__CDECL __cdecl
|
||||
#else
|
||||
#define STBRP__NOTUSED(v) (void)sizeof(v)
|
||||
#define STBRP__CDECL
|
||||
#endif
|
||||
|
||||
enum
|
||||
{
|
||||
STBRP__INIT_skyline = 1
|
||||
};
|
||||
|
||||
STBRP_DEF void stbrp_setup_heuristic(stbrp_context *context, int heuristic)
|
||||
{
|
||||
switch (context->init_mode) {
|
||||
case STBRP__INIT_skyline:
|
||||
STBRP_ASSERT(heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight || heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight);
|
||||
context->heuristic = heuristic;
|
||||
break;
|
||||
default:
|
||||
STBRP_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
STBRP_DEF void stbrp_setup_allow_out_of_mem(stbrp_context *context, int allow_out_of_mem)
|
||||
{
|
||||
if (allow_out_of_mem)
|
||||
// if it's ok to run out of memory, then don't bother aligning them;
|
||||
// this gives better packing, but may fail due to OOM (even though
|
||||
// the rectangles easily fit). @TODO a smarter approach would be to only
|
||||
// quantize once we've hit OOM, then we could get rid of this parameter.
|
||||
context->align = 1;
|
||||
else {
|
||||
// if it's not ok to run out of memory, then quantize the widths
|
||||
// so that num_nodes is always enough nodes.
|
||||
//
|
||||
// I.e. num_nodes * align >= width
|
||||
// align >= width / num_nodes
|
||||
// align = ceil(width/num_nodes)
|
||||
|
||||
context->align = (context->width + context->num_nodes-1) / context->num_nodes;
|
||||
}
|
||||
}
|
||||
|
||||
STBRP_DEF void stbrp_init_target(stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i < num_nodes-1; ++i)
|
||||
nodes[i].next = &nodes[i+1];
|
||||
nodes[i].next = NULL;
|
||||
context->init_mode = STBRP__INIT_skyline;
|
||||
context->heuristic = STBRP_HEURISTIC_Skyline_default;
|
||||
context->free_head = &nodes[0];
|
||||
context->active_head = &context->extra[0];
|
||||
context->width = width;
|
||||
context->height = height;
|
||||
context->num_nodes = num_nodes;
|
||||
stbrp_setup_allow_out_of_mem(context, 0);
|
||||
|
||||
// node 0 is the full width, node 1 is the sentinel (lets us not store width explicitly)
|
||||
context->extra[0].x = 0;
|
||||
context->extra[0].y = 0;
|
||||
context->extra[0].next = &context->extra[1];
|
||||
context->extra[1].x = (stbrp_coord) width;
|
||||
context->extra[1].y = (1<<30);
|
||||
context->extra[1].next = NULL;
|
||||
}
|
||||
|
||||
// find minimum y position if it starts at x1
|
||||
static int stbrp__skyline_find_min_y(stbrp_context *c, stbrp_node *first, int x0, int width, int *pwaste)
|
||||
{
|
||||
stbrp_node *node = first;
|
||||
int x1 = x0 + width;
|
||||
int min_y, visited_width, waste_area;
|
||||
|
||||
STBRP__NOTUSED(c);
|
||||
|
||||
STBRP_ASSERT(first->x <= x0);
|
||||
|
||||
#if 0
|
||||
// skip in case we're past the node
|
||||
while (node->next->x <= x0)
|
||||
++node;
|
||||
#else
|
||||
STBRP_ASSERT(node->next->x > x0); // we ended up handling this in the caller for efficiency
|
||||
#endif
|
||||
|
||||
STBRP_ASSERT(node->x <= x0);
|
||||
|
||||
min_y = 0;
|
||||
waste_area = 0;
|
||||
visited_width = 0;
|
||||
while (node->x < x1) {
|
||||
if (node->y > min_y) {
|
||||
// raise min_y higher.
|
||||
// we've accounted for all waste up to min_y,
|
||||
// but we'll now add more waste for everything we've visted
|
||||
waste_area += visited_width * (node->y - min_y);
|
||||
min_y = node->y;
|
||||
// the first time through, visited_width might be reduced
|
||||
if (node->x < x0)
|
||||
visited_width += node->next->x - x0;
|
||||
else
|
||||
visited_width += node->next->x - node->x;
|
||||
} else {
|
||||
// add waste area
|
||||
int under_width = node->next->x - node->x;
|
||||
if (under_width + visited_width > width)
|
||||
under_width = width - visited_width;
|
||||
waste_area += under_width * (min_y - node->y);
|
||||
visited_width += under_width;
|
||||
}
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
*pwaste = waste_area;
|
||||
return min_y;
|
||||
}
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int x,y;
|
||||
stbrp_node **prev_link;
|
||||
} stbrp__findresult;
|
||||
|
||||
static stbrp__findresult stbrp__skyline_find_best_pos(stbrp_context *c, int width, int height)
|
||||
{
|
||||
int best_waste = (1<<30), best_x, best_y = (1 << 30);
|
||||
stbrp__findresult fr;
|
||||
stbrp_node **prev, *node, *tail, **best = NULL;
|
||||
|
||||
// align to multiple of c->align
|
||||
width = (width + c->align - 1);
|
||||
width -= width % c->align;
|
||||
STBRP_ASSERT(width % c->align == 0);
|
||||
|
||||
// if it can't possibly fit, bail immediately
|
||||
if (width > c->width || height > c->height) {
|
||||
fr.prev_link = NULL;
|
||||
fr.x = fr.y = 0;
|
||||
return fr;
|
||||
}
|
||||
|
||||
node = c->active_head;
|
||||
prev = &c->active_head;
|
||||
while (node->x + width <= c->width) {
|
||||
int y,waste;
|
||||
y = stbrp__skyline_find_min_y(c, node, node->x, width, &waste);
|
||||
if (c->heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight) { // actually just want to test BL
|
||||
// bottom left
|
||||
if (y < best_y) {
|
||||
best_y = y;
|
||||
best = prev;
|
||||
}
|
||||
} else {
|
||||
// best-fit
|
||||
if (y + height <= c->height) {
|
||||
// can only use it if it first vertically
|
||||
if (y < best_y || (y == best_y && waste < best_waste)) {
|
||||
best_y = y;
|
||||
best_waste = waste;
|
||||
best = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
prev = &node->next;
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
best_x = (best == NULL) ? 0 : (*best)->x;
|
||||
|
||||
// if doing best-fit (BF), we also have to try aligning right edge to each node position
|
||||
//
|
||||
// e.g, if fitting
|
||||
//
|
||||
// ____________________
|
||||
// |____________________|
|
||||
//
|
||||
// into
|
||||
//
|
||||
// | |
|
||||
// | ____________|
|
||||
// |____________|
|
||||
//
|
||||
// then right-aligned reduces waste, but bottom-left BL is always chooses left-aligned
|
||||
//
|
||||
// This makes BF take about 2x the time
|
||||
|
||||
if (c->heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight) {
|
||||
tail = c->active_head;
|
||||
node = c->active_head;
|
||||
prev = &c->active_head;
|
||||
// find first node that's admissible
|
||||
while (tail->x < width)
|
||||
tail = tail->next;
|
||||
while (tail) {
|
||||
int xpos = tail->x - width;
|
||||
int y,waste;
|
||||
STBRP_ASSERT(xpos >= 0);
|
||||
// find the left position that matches this
|
||||
while (node->next->x <= xpos) {
|
||||
prev = &node->next;
|
||||
node = node->next;
|
||||
}
|
||||
STBRP_ASSERT(node->next->x > xpos && node->x <= xpos);
|
||||
y = stbrp__skyline_find_min_y(c, node, xpos, width, &waste);
|
||||
if (y + height <= c->height) {
|
||||
if (y <= best_y) {
|
||||
if (y < best_y || waste < best_waste || (waste==best_waste && xpos < best_x)) {
|
||||
best_x = xpos;
|
||||
//STBRP_ASSERT(y <= best_y); [DEAR IMGUI]
|
||||
best_y = y;
|
||||
best_waste = waste;
|
||||
best = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
}
|
||||
|
||||
fr.prev_link = best;
|
||||
fr.x = best_x;
|
||||
fr.y = best_y;
|
||||
return fr;
|
||||
}
|
||||
|
||||
static stbrp__findresult stbrp__skyline_pack_rectangle(stbrp_context *context, int width, int height)
|
||||
{
|
||||
// find best position according to heuristic
|
||||
stbrp__findresult res = stbrp__skyline_find_best_pos(context, width, height);
|
||||
stbrp_node *node, *cur;
|
||||
|
||||
// bail if:
|
||||
// 1. it failed
|
||||
// 2. the best node doesn't fit (we don't always check this)
|
||||
// 3. we're out of memory
|
||||
if (res.prev_link == NULL || res.y + height > context->height || context->free_head == NULL) {
|
||||
res.prev_link = NULL;
|
||||
return res;
|
||||
}
|
||||
|
||||
// on success, create new node
|
||||
node = context->free_head;
|
||||
node->x = (stbrp_coord) res.x;
|
||||
node->y = (stbrp_coord) (res.y + height);
|
||||
|
||||
context->free_head = node->next;
|
||||
|
||||
// insert the new node into the right starting point, and
|
||||
// let 'cur' point to the remaining nodes needing to be
|
||||
// stiched back in
|
||||
|
||||
cur = *res.prev_link;
|
||||
if (cur->x < res.x) {
|
||||
// preserve the existing one, so start testing with the next one
|
||||
stbrp_node *next = cur->next;
|
||||
cur->next = node;
|
||||
cur = next;
|
||||
} else {
|
||||
*res.prev_link = node;
|
||||
}
|
||||
|
||||
// from here, traverse cur and free the nodes, until we get to one
|
||||
// that shouldn't be freed
|
||||
while (cur->next && cur->next->x <= res.x + width) {
|
||||
stbrp_node *next = cur->next;
|
||||
// move the current node to the free list
|
||||
cur->next = context->free_head;
|
||||
context->free_head = cur;
|
||||
cur = next;
|
||||
}
|
||||
|
||||
// stitch the list back in
|
||||
node->next = cur;
|
||||
|
||||
if (cur->x < res.x + width)
|
||||
cur->x = (stbrp_coord) (res.x + width);
|
||||
|
||||
#ifdef _DEBUG
|
||||
cur = context->active_head;
|
||||
while (cur->x < context->width) {
|
||||
STBRP_ASSERT(cur->x < cur->next->x);
|
||||
cur = cur->next;
|
||||
}
|
||||
STBRP_ASSERT(cur->next == NULL);
|
||||
|
||||
{
|
||||
int count=0;
|
||||
cur = context->active_head;
|
||||
while (cur) {
|
||||
cur = cur->next;
|
||||
++count;
|
||||
}
|
||||
cur = context->free_head;
|
||||
while (cur) {
|
||||
cur = cur->next;
|
||||
++count;
|
||||
}
|
||||
STBRP_ASSERT(count == context->num_nodes+2);
|
||||
}
|
||||
#endif
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int STBRP__CDECL rect_height_compare(const void *a, const void *b)
|
||||
{
|
||||
const stbrp_rect *p = (const stbrp_rect *) a;
|
||||
const stbrp_rect *q = (const stbrp_rect *) b;
|
||||
if (p->h > q->h)
|
||||
return -1;
|
||||
if (p->h < q->h)
|
||||
return 1;
|
||||
return (p->w > q->w) ? -1 : (p->w < q->w);
|
||||
}
|
||||
|
||||
static int STBRP__CDECL rect_original_order(const void *a, const void *b)
|
||||
{
|
||||
const stbrp_rect *p = (const stbrp_rect *) a;
|
||||
const stbrp_rect *q = (const stbrp_rect *) b;
|
||||
return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed);
|
||||
}
|
||||
|
||||
STBRP_DEF int stbrp_pack_rects(stbrp_context *context, stbrp_rect *rects, int num_rects)
|
||||
{
|
||||
int i, all_rects_packed = 1;
|
||||
|
||||
// we use the 'was_packed' field internally to allow sorting/unsorting
|
||||
for (i=0; i < num_rects; ++i) {
|
||||
rects[i].was_packed = i;
|
||||
}
|
||||
|
||||
// sort according to heuristic
|
||||
STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_height_compare);
|
||||
|
||||
for (i=0; i < num_rects; ++i) {
|
||||
if (rects[i].w == 0 || rects[i].h == 0) {
|
||||
rects[i].x = rects[i].y = 0; // empty rect needs no space
|
||||
} else {
|
||||
stbrp__findresult fr = stbrp__skyline_pack_rectangle(context, rects[i].w, rects[i].h);
|
||||
if (fr.prev_link) {
|
||||
rects[i].x = (stbrp_coord) fr.x;
|
||||
rects[i].y = (stbrp_coord) fr.y;
|
||||
} else {
|
||||
rects[i].x = rects[i].y = STBRP__MAXVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unsort
|
||||
STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_original_order);
|
||||
|
||||
// set was_packed flags and all_rects_packed status
|
||||
for (i=0; i < num_rects; ++i) {
|
||||
rects[i].was_packed = !(rects[i].x == STBRP__MAXVAL && rects[i].y == STBRP__MAXVAL);
|
||||
if (!rects[i].was_packed)
|
||||
all_rects_packed = 0;
|
||||
}
|
||||
|
||||
// return the all_rects_packed status
|
||||
return all_rects_packed;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
------------------------------------------------------------------------------
|
||||
This software is available under 2 licenses -- choose whichever you prefer.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE A - MIT License
|
||||
Copyright (c) 2017 Sean Barrett
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE B - Public Domain (www.unlicense.org)
|
||||
This is free and unencumbered software released into the public domain.
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
|
||||
software, either in source code form or as a compiled binary, for any purpose,
|
||||
commercial or non-commercial, and by any means.
|
||||
In jurisdictions that recognize copyright laws, the author or authors of this
|
||||
software dedicate any and all copyright interest in the software to the public
|
||||
domain. We make this dedication for the benefit of the public at large and to
|
||||
the detriment of our heirs and successors. We intend this dedication to be an
|
||||
overt act of relinquishment in perpetuity of all present and future rights to
|
||||
this software under copyright law.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
*/
|
||||
1527
lib/third_party/dear-imgui/imstb_textedit.h
vendored
Normal file
1527
lib/third_party/dear-imgui/imstb_textedit.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
5085
lib/third_party/dear-imgui/imstb_truetype.h
vendored
Normal file
5085
lib/third_party/dear-imgui/imstb_truetype.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
28
lib_main.cpp
28
lib_main.cpp
@ -2,31 +2,41 @@
|
||||
// translation unit.
|
||||
// lib_main.cpp can be treated as a single-header library and added to a project like that.
|
||||
|
||||
// #TODO: This is quite disorganized. There must be a better way to do this by moving the
|
||||
// #TODO: #Library This is quite disorganized. There must be a better way to do this by moving the
|
||||
// typedefs and procedures that require forward declaration to the top with a metaprogram.
|
||||
// [ ] Linux / MacOS Ports
|
||||
|
||||
#include "lib/meta_generated.h"
|
||||
#include "lib/Base/Base.h"
|
||||
#include "lib/Base/Allocator.h"
|
||||
#include "lib/Base/Array.h"
|
||||
#include "lib/Base/General_Purpose_Allocator.h"
|
||||
|
||||
#if OS_WINDOWS
|
||||
# include "lib/OS/OS_Win32.h"
|
||||
#endif
|
||||
|
||||
#include "lib/Base/Arena.h"
|
||||
#include "lib/Base/Arena_Array.h"
|
||||
|
||||
#include "lib/Base/String.h"
|
||||
#include "lib/Base/Logger.h"
|
||||
#include "lib/Base/String.cpp"
|
||||
#include "lib/Base/ErrorCodes.cpp"
|
||||
|
||||
#include "lib/Base/ErrorType.cpp"
|
||||
#include "lib/Base/Arena_Table.cpp"
|
||||
#include "lib/Base/Base_Thread_Context.h"
|
||||
#include "lib/Base/Expandable_Arena.h"
|
||||
|
||||
#include "lib/Base/Arena.cpp"
|
||||
|
||||
#include "lib/Base/Arena_Table.cpp"
|
||||
#include "lib/Base/Base_Thread_Context.cpp"
|
||||
#include "lib/Base/Logger.cpp"
|
||||
#include "lib/Base/Expandable_Arena.cpp"
|
||||
#include "lib/Base/Allocator.cpp"
|
||||
#include "lib/Base/General_Purpose_Allocator.cpp"
|
||||
#include "lib/Base/Basic.cpp"
|
||||
|
||||
// OS-Abstraction Layer
|
||||
#include "lib/Base/Threads.cpp"
|
||||
#include "lib/OS/Base_Entry_Point.cpp"
|
||||
|
||||
|
||||
#if OS_WINDOWS
|
||||
# include "lib/OS/OS_Win32.cpp"
|
||||
@ -37,7 +47,7 @@
|
||||
// #include "imgui-docking.cpp"
|
||||
|
||||
// #if OS_LINUX..
|
||||
// #include "src/OS_Linux.cpp" // #TODO: Future.
|
||||
// #include "src/OS_Linux.cpp"
|
||||
// #if OS_MACOS..
|
||||
// #include "src/OS_MacOS.cpp" // #TODO: Future.
|
||||
// #include "src/OS_MacOS.cpp"
|
||||
|
||||
|
||||
230
src/Base_Entry_Point.cpp
Normal file
230
src/Base_Entry_Point.cpp
Normal file
@ -0,0 +1,230 @@
|
||||
internal void Main_Entry_Point (int argc, WCHAR **argv);
|
||||
|
||||
#if OS_WINDOWS
|
||||
#if BUILD_CONSOLE_INTERFACE
|
||||
int wmain(int argc, WCHAR **argv) {
|
||||
// debug_break();
|
||||
Bootstrap_Main_Thread_Context();
|
||||
// Win32_Entry_Point(argc, argv); // This might be the problem.
|
||||
Main_Entry_Point(argc, argv);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPWSTR lpCmdLine, int nShowCmd) {
|
||||
Bootstrap_Main_Thread_Context();
|
||||
Win32_Entry_Point(__argc, __wargv);
|
||||
Main_Entry_Point(__argc, __wargv);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <d3d11.h>
|
||||
#include <tchar.h>
|
||||
#include "ImGui_Supplementary.cpp"
|
||||
|
||||
void ImGui_Application () {
|
||||
// Make process DPI aware and obtain main monitor scale
|
||||
ImGui_ImplWin32_EnableDpiAwareness();
|
||||
f32 main_scale = ImGui_ImplWin32_GetDpiScaleForMonitor(::MonitorFromPoint(POINT{ 0, 0 }, MONITOR_DEFAULTTOPRIMARY));
|
||||
|
||||
// Create application window
|
||||
WNDCLASSEXW wc = { sizeof(wc), CS_CLASSDC, WndProc, 0L, 0L, GetModuleHandle(nullptr), nullptr, nullptr, nullptr, nullptr, L"ImGui Example", nullptr };
|
||||
::RegisterClassExW(&wc);
|
||||
HWND hwnd = ::CreateWindowW(wc.lpszClassName, L"Dear ImGui DirectX11 Example", WS_OVERLAPPEDWINDOW, 100, 100, (int)(1280 * main_scale), (int)(800 * main_scale), nullptr, nullptr, wc.hInstance, nullptr);
|
||||
|
||||
// Initialize Direct3D
|
||||
if (!CreateDeviceD3D(hwnd)) {
|
||||
CleanupDeviceD3D();
|
||||
::UnregisterClassW(wc.lpszClassName, wc.hInstance);
|
||||
return;
|
||||
}
|
||||
|
||||
// Show the window
|
||||
::ShowWindow(hwnd, SW_SHOWDEFAULT);
|
||||
::UpdateWindow(hwnd);
|
||||
|
||||
// Setup Dear ImGui context
|
||||
IMGUI_CHECKVERSION();
|
||||
printf("ImGui Version %s \n", ImGui::GetVersion());
|
||||
ImGui::CreateContext();
|
||||
ImGuiIO& io = ImGui::GetIO(); (void)io;
|
||||
io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls
|
||||
io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad; // Enable Gamepad Controls
|
||||
io.ConfigFlags |= ImGuiConfigFlags_DockingEnable; // Enable Docking
|
||||
io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; // Enable Multi-Viewport / Platform Windows
|
||||
|
||||
//io.ConfigViewportsNoAutoMerge = true;
|
||||
//io.ConfigViewportsNoTaskBarIcon = true;
|
||||
//io.ConfigDockingAlwaysTabBar = true;
|
||||
//io.ConfigDockingTransparentPayload = true;
|
||||
|
||||
// Setup Dear ImGui style
|
||||
ImGui::StyleColorsDark();
|
||||
|
||||
// Setup scaling
|
||||
ImGuiStyle& style = ImGui::GetStyle();
|
||||
style.ScaleAllSizes(main_scale); // Bake a fixed style scale. (until we have a solution for dynamic style scaling, changing this requires resetting Style + calling this again)
|
||||
style.FontScaleDpi = main_scale; // Set initial font scale. (using io.ConfigDpiScaleFonts=true makes this unnecessary. We leave both here for documentation purpose)
|
||||
io.ConfigDpiScaleFonts = true; // [Experimental] Automatically overwrite style.FontScaleDpi in Begin() when Monitor DPI changes. This will scale fonts but _NOT_ scale sizes/padding for now.
|
||||
io.ConfigDpiScaleViewports = true; // [Experimental] Scale Dear ImGui and Platform Windows when Monitor DPI changes.
|
||||
|
||||
// When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones.
|
||||
if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable)
|
||||
{
|
||||
style.WindowRounding = 0.0f;
|
||||
style.Colors[ImGuiCol_WindowBg].w = 1.0f;
|
||||
}
|
||||
|
||||
// Setup Platform/Renderer backends
|
||||
ImGui_ImplWin32_Init(hwnd);
|
||||
ImGui_ImplDX11_Init(g_pd3dDevice, g_pd3dDeviceContext);
|
||||
|
||||
// #TODO: #ImGUI - Load fonts:
|
||||
// - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use ImGui::PushFont()/PopFont() to select them.
|
||||
// - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple.
|
||||
// - If the file cannot be loaded, the function will return a nullptr. Please handle those errors in your application (e.g. use an assertion, or display an error and quit).
|
||||
// - Use '#define IMGUI_ENABLE_FREETYPE' in your imconfig file to use Freetype for higher quality font rendering.
|
||||
// - Read 'docs/FONTS.md' for more instructions and details. If you like the default font but want it to scale better, consider using the 'ProggyVector' from the same author!
|
||||
// - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ !
|
||||
//style.FontSizeBase = 20.0f;
|
||||
//io.Fonts->AddFontDefault();
|
||||
//io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\segoeui.ttf");
|
||||
//io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf");
|
||||
//io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf");
|
||||
//io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf");
|
||||
//ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf");
|
||||
//IM_ASSERT(font != nullptr);
|
||||
|
||||
bool show_demo_window = true;
|
||||
bool show_another_window = false;
|
||||
ImVec4 clear_color = ImVec4(0.45f, 0.55f, 0.60f, 1.00f);
|
||||
|
||||
// Main loop
|
||||
bool done = false;
|
||||
|
||||
while (!done) {
|
||||
// Poll and handle messages (inputs, window resize, etc.)
|
||||
// See the WndProc() function below for our to dispatch events to the Win32 backend.
|
||||
MSG msg;
|
||||
while (::PeekMessage(&msg, nullptr, 0U, 0U, PM_REMOVE)) {
|
||||
::TranslateMessage(&msg);
|
||||
::DispatchMessage(&msg);
|
||||
if (msg.message == WM_QUIT)
|
||||
done = true;
|
||||
}
|
||||
if (done)
|
||||
break;
|
||||
|
||||
// Handle window being minimized or screen locked
|
||||
if (g_SwapChainOccluded && g_pSwapChain->Present(0, DXGI_PRESENT_TEST) == DXGI_STATUS_OCCLUDED) {
|
||||
::Sleep(10);
|
||||
continue;
|
||||
}
|
||||
g_SwapChainOccluded = false;
|
||||
|
||||
// Handle window resize (we don't resize directly in the WM_SIZE handler)
|
||||
if (g_ResizeWidth != 0 && g_ResizeHeight != 0) {
|
||||
CleanupRenderTarget();
|
||||
g_pSwapChain->ResizeBuffers(0, g_ResizeWidth, g_ResizeHeight, DXGI_FORMAT_UNKNOWN, 0);
|
||||
g_ResizeWidth = g_ResizeHeight = 0;
|
||||
CreateRenderTarget();
|
||||
}
|
||||
|
||||
// Start the Dear ImGui frame
|
||||
ImGui_ImplDX11_NewFrame();
|
||||
ImGui_ImplWin32_NewFrame();
|
||||
ImGui::NewFrame();
|
||||
|
||||
// Simple dockspace:
|
||||
ImGui::DockSpaceOverViewport();
|
||||
|
||||
{
|
||||
ImGui::Begin("Hello, world!");
|
||||
|
||||
if (ImGui::Button("Create New Window")) {
|
||||
// I think that create_window should take few parameters, and we have other APIs for
|
||||
// styling, positioning, etc. I just call this and want to get a window.
|
||||
// auto new_window = create_window(window_name);
|
||||
}
|
||||
|
||||
if (ImGui::Button("Position recently created Window")) {
|
||||
|
||||
}
|
||||
|
||||
ImGui::End();
|
||||
}
|
||||
|
||||
|
||||
// Rendering
|
||||
ImGui::Render();
|
||||
const f32 clear_color_with_alpha[4] = { clear_color.x * clear_color.w, clear_color.y * clear_color.w, clear_color.z * clear_color.w, clear_color.w };
|
||||
g_pd3dDeviceContext->OMSetRenderTargets(1, &g_mainRenderTargetView, nullptr);
|
||||
g_pd3dDeviceContext->ClearRenderTargetView(g_mainRenderTargetView, clear_color_with_alpha);
|
||||
ImGui_ImplDX11_RenderDrawData(ImGui::GetDrawData());
|
||||
|
||||
// Update and Render additional Platform Windows
|
||||
if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable)
|
||||
{
|
||||
ImGui::UpdatePlatformWindows();
|
||||
ImGui::RenderPlatformWindowsDefault();
|
||||
}
|
||||
|
||||
// Present
|
||||
HRESULT hr = g_pSwapChain->Present(1, 0); // Present with vsync
|
||||
// HRESULT hr = g_pSwapChain->Present(0, 0); // Present without vsync
|
||||
g_SwapChainOccluded = (hr == DXGI_STATUS_OCCLUDED);
|
||||
} // while (!done)
|
||||
|
||||
// Cleanupre
|
||||
ImGui_ImplDX11_Shutdown();
|
||||
ImGui_ImplWin32_Shutdown();
|
||||
ImGui::DestroyContext();
|
||||
|
||||
CleanupDeviceD3D();
|
||||
::DestroyWindow(hwnd);
|
||||
::UnregisterClassW(wc.lpszClassName, wc.hInstance);
|
||||
}
|
||||
|
||||
string string_literal_example = "Hello, I am a string literal.";
|
||||
|
||||
internal void Main_Entry_Point (int argc, WCHAR **argv) {
|
||||
// See: main_thread_base_entry_point
|
||||
temp_reset();
|
||||
push_allocator(get_temp_allocator());
|
||||
// tip: use auto_reset or auto_release with `get_thread_context()->arena`
|
||||
|
||||
// debug_break();
|
||||
|
||||
// String builder example:
|
||||
// OK. I can work with this.
|
||||
|
||||
auto sb = new_string_builder(Arena_Reserve::Size_64K);
|
||||
append(sb, "string_literal_example");
|
||||
append(sb, " ");
|
||||
print_to_builder(sb, "There are %d cats in the %s", 64, "house.\n");
|
||||
append(sb, " > ");
|
||||
print_to_builder(sb, "some size_t: %u", (u64)3982739867);
|
||||
append(sb, "\n");
|
||||
print_to_builder(sb, "the literal: %s", string_literal_example.data);
|
||||
|
||||
// string result = string_view(sb);
|
||||
string result = builder_to_string(sb); // also frees
|
||||
// print((char*)result.data);
|
||||
log("Hello.");
|
||||
log("log() should automatically append newlines to these prints.");
|
||||
log("If it doesn't, I will be very upset.");
|
||||
log(result);
|
||||
log("Hello. There are %s things here\n", string_literal_example.data);
|
||||
|
||||
print("Hello, I am just a printed message to stdout\n\n");
|
||||
|
||||
// free_string_builder(sb);
|
||||
|
||||
// ImGui_Application();
|
||||
|
||||
// #TODO: #Main - `Main_Entry_Point`
|
||||
// [ ] Setup Mouse and Keyboard Inputs
|
||||
// [ ] Launch second thread
|
||||
}
|
||||
|
||||
106
src/ImGui_Supplementary.cpp
Normal file
106
src/ImGui_Supplementary.cpp
Normal file
@ -0,0 +1,106 @@
|
||||
// Data
|
||||
static ID3D11Device* g_pd3dDevice = nullptr;
|
||||
static ID3D11DeviceContext* g_pd3dDeviceContext = nullptr;
|
||||
static IDXGISwapChain* g_pSwapChain = nullptr;
|
||||
static bool g_SwapChainOccluded = false;
|
||||
static UINT g_ResizeWidth = 0, g_ResizeHeight = 0;
|
||||
static ID3D11RenderTargetView* g_mainRenderTargetView = nullptr;
|
||||
|
||||
// Forward declarations of helper functions
|
||||
bool CreateDeviceD3D(HWND hWnd);
|
||||
void CleanupDeviceD3D();
|
||||
void CreateRenderTarget();
|
||||
void CleanupRenderTarget();
|
||||
LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam);
|
||||
|
||||
// Helper functions
|
||||
bool CreateDeviceD3D(HWND hWnd) {
|
||||
// Setup swap chain
|
||||
// This is a basic setup. Optimally could use e.g. DXGI_SWAP_EFFECT_FLIP_DISCARD and handle fullscreen mode differently. See #8979 for suggestions.
|
||||
DXGI_SWAP_CHAIN_DESC sd;
|
||||
ZeroMemory(&sd, sizeof(sd));
|
||||
sd.BufferCount = 2;
|
||||
sd.BufferDesc.Width = 0;
|
||||
sd.BufferDesc.Height = 0;
|
||||
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
|
||||
sd.BufferDesc.RefreshRate.Numerator = 60;
|
||||
sd.BufferDesc.RefreshRate.Denominator = 1;
|
||||
sd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
|
||||
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
|
||||
sd.OutputWindow = hWnd;
|
||||
sd.SampleDesc.Count = 1;
|
||||
sd.SampleDesc.Quality = 0;
|
||||
sd.Windowed = TRUE;
|
||||
sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
|
||||
|
||||
UINT createDeviceFlags = 0;
|
||||
//createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
|
||||
D3D_FEATURE_LEVEL featureLevel;
|
||||
const D3D_FEATURE_LEVEL featureLevelArray[2] = { D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_0, };
|
||||
HRESULT res = D3D11CreateDeviceAndSwapChain(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, createDeviceFlags, featureLevelArray, 2, D3D11_SDK_VERSION, &sd, &g_pSwapChain, &g_pd3dDevice, &featureLevel, &g_pd3dDeviceContext);
|
||||
if (res == DXGI_ERROR_UNSUPPORTED) // Try high-performance WARP software driver if hardware is not available.
|
||||
res = D3D11CreateDeviceAndSwapChain(nullptr, D3D_DRIVER_TYPE_WARP, nullptr, createDeviceFlags, featureLevelArray, 2, D3D11_SDK_VERSION, &sd, &g_pSwapChain, &g_pd3dDevice, &featureLevel, &g_pd3dDeviceContext);
|
||||
if (res != S_OK)
|
||||
return false;
|
||||
|
||||
// Disable DXGI's default Alt+Enter fullscreen behavior.
|
||||
// - You are free to leave this enabled, but it will not work properly with multiple viewports.
|
||||
// - This must be done for all windows associated to the device. Our DX11 backend does this automatically for secondary viewports that it creates.
|
||||
IDXGIFactory* pSwapChainFactory;
|
||||
if (SUCCEEDED(g_pSwapChain->GetParent(IID_PPV_ARGS(&pSwapChainFactory)))) {
|
||||
pSwapChainFactory->MakeWindowAssociation(hWnd, DXGI_MWA_NO_ALT_ENTER);
|
||||
pSwapChainFactory->Release();
|
||||
}
|
||||
|
||||
CreateRenderTarget();
|
||||
return true;
|
||||
}
|
||||
|
||||
void CleanupDeviceD3D() {
|
||||
CleanupRenderTarget();
|
||||
if (g_pSwapChain) { g_pSwapChain->Release(); g_pSwapChain = nullptr; }
|
||||
if (g_pd3dDeviceContext) { g_pd3dDeviceContext->Release(); g_pd3dDeviceContext = nullptr; }
|
||||
if (g_pd3dDevice) { g_pd3dDevice->Release(); g_pd3dDevice = nullptr; }
|
||||
}
|
||||
|
||||
void CreateRenderTarget() {
|
||||
ID3D11Texture2D* pBackBuffer;
|
||||
g_pSwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer));
|
||||
g_pd3dDevice->CreateRenderTargetView(pBackBuffer, nullptr, &g_mainRenderTargetView);
|
||||
pBackBuffer->Release();
|
||||
}
|
||||
|
||||
void CleanupRenderTarget() {
|
||||
if (g_mainRenderTargetView) { g_mainRenderTargetView->Release(); g_mainRenderTargetView = nullptr; }
|
||||
}
|
||||
|
||||
// Forward declare message handler from imgui_impl_win32.cpp
|
||||
extern IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam);
|
||||
|
||||
// Win32 message handler
|
||||
// You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs.
|
||||
// - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data.
|
||||
// - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data.
|
||||
// Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags.
|
||||
LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) {
|
||||
if (ImGui_ImplWin32_WndProcHandler(hWnd, msg, wParam, lParam))
|
||||
return true;
|
||||
|
||||
switch (msg) {
|
||||
case WM_SIZE:
|
||||
if (wParam == SIZE_MINIMIZED)
|
||||
return 0;
|
||||
g_ResizeWidth = (UINT)LOWORD(lParam); // Queue resize
|
||||
g_ResizeHeight = (UINT)HIWORD(lParam);
|
||||
|
||||
return 0;
|
||||
case WM_SYSCOMMAND:
|
||||
if ((wParam & 0xfff0) == SC_KEYMENU) // Disable ALT application menu
|
||||
return 0;
|
||||
break;
|
||||
case WM_DESTROY:
|
||||
::PostQuitMessage(0);
|
||||
return 0;
|
||||
}
|
||||
return ::DefWindowProcW(hWnd, msg, wParam, lParam);
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user