Initial commit - still needs some work and some documentation.

This commit is contained in:
Musa Mahmood 2026-01-19 15:15:36 -05:00
commit fd8ceeb7c9
72 changed files with 87606 additions and 0 deletions

366
.gitignore vendored Normal file
View File

@ -0,0 +1,366 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
.stfolder/
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
bld/
[Bb]in/
[Oo]bj/
[Ll]og/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUNIT
*.VisualState.xml
TestResult.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_i.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# JustCode is a .NET coding add-in
.JustCode
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# JetBrains Rider
.idea/
*.sln.iml
# CodeRush
.cr/
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
*.egg-info
*.egg
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
.exe
.raw
.txt
*.csv
*.pickle
*.onnx
#Environment file
.env
.vscode/
installed*
compiled/
python/flowcat.egg-info/
.Rproj.user
# Docs build files
docs/_build/*
docs/Doxyfile*
# CMake & GNU Make
CMakeCache.txt
CMakeFiles/
Makefile
# Libraries
*.so
*.a
CMakeSettings.json
._.gitignore
build/

57
CMakeLists.txt Normal file
View File

@ -0,0 +1,57 @@
############# README ############# README ############# README ############
# Option to choose between shared or static library
# Pass this as an argument as follows when configuring the project:
# `cmake -S . -B build
# then build it with either `Release` or `Debug` option:
# `cmake --build build --config Release`
############ /README ############ /README ############ /README ############
cmake_minimum_required(VERSION 3.20)
project(musa-explorer-cpp)
# Use C++11
SET (CMAKE_CXX_STANDARD 11)
SET (CMAKE_VERBOSE_MAKEFILE ON)
if (MSVC)
# Suppress warning: C++ exception handler used, but unwind semantics are not enabled.
add_compile_options(/wd4530)
#add_compile_options(/MD)
endif()
SET (EXE_NAME "mexplore_v2")
SET (SRC_FILES
lib/third_party/dear-imgui/imgui.cpp
lib/third_party/dear-imgui/imgui_widgets.cpp
lib/third_party/dear-imgui/imgui_draw.cpp
lib/third_party/dear-imgui/imgui_tables.cpp
lib/third_party/dear-imgui/imgui_impl_dx11.cpp
lib/third_party/dear-imgui/imgui_impl_win32.cpp
exe_main.cpp
)
SET (INCLUDE_DIRS
${PROJECT_SOURCE_DIR}/src
${PROJECT_SOURCE_DIR}/lib
${PROJECT_SOURCE_DIR}/lib/third_party
)
SET (LIBRARY_PATHS
${PROJECT_SOURCE_DIR}/lib/third_party/Everything/windows/Everything64.lib
)
add_executable(${EXE_NAME} ${SRC_FILES})
target_include_directories(${EXE_NAME} PRIVATE ${INCLUDE_DIRS})
target_link_libraries(${EXE_NAME} PRIVATE ${LIBRARY_PATHS})
message(STATUS "Build type: $<CONFIG>")
add_custom_command(TARGET ${EXE_NAME} POST_BUILD
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
COMMAND ${CMAKE_COMMAND} -E echo "Running post-build script..."
COMMAND cmd.exe /c "${CMAKE_SOURCE_DIR}/copy_files.cmd" $<CONFIG>
COMMENT "Running custom post-build script."
)

41
README.md Normal file
View File

@ -0,0 +1,41 @@
# Musa C++ Standard Template Library
## Project Overview
- This project uses CMake as a demonstration, but it's not really necessary, since the project is so simple.
- Currently, only Windows is supported for the main application and OS base layer.
- All the container classes should be supported on all platforms.
### [TODOs]
[ ] Replace
[ ] Plans to use a metaprogram to modify code for certain reasons (e.g. generating header files, generating stack traces in debug mode)
[ ] Add Linux Support
[ ] Add Android Support
[ ] Add MacOS Support
[ ] Add iOS Support
[ ] Add UI framework
## Configuration
This project uses CMake to configure and build. Currently only Win32 is supported.
```
cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
```
## Build
To build in debug mode:
```
cmake --build build --config Debug
```
To build in release mode:
```
cmake --build build --config Release
```
# APIs
## Base Layer
## Thread-local Context
- Also explain thread creation.
## OS Platform Layer
## Debug Tooling

17
copy_files.cmd Normal file
View File

@ -0,0 +1,17 @@
@echo off
set CONFIG=%1
echo Build configuration is: %CONFIG%
if /i "%CONFIG%"=="Debug" (
echo Copying files to debug directory
copy /Y "extras\fonts\RobotoMono-Regular.ttf" "build\Debug\RobotoMono-Regular.ttf"
copy /Y "extras\icons\tmp.ico" "build\Debug\tmp.ico"
copy /Y "extras\icons\tmp_min.ico" "build\Debug\tmp_min.ico"
copy /Y "lib\third_party\Everything\windows\Everything64.dll" "build\Debug\Everything64.dll"
) else if /i "%CONFIG%"=="Release" (
echo Copying files to release directory
copy /Y "extras\fonts\RobotoMono-Regular.ttf" "build\Release\RobotoMono-Regular.ttf"
copy /Y "extras\icons\tmp.ico" "build\Release\tmp.ico"
copy /Y "extras\icons\tmp_min.ico" "build\Release\tmp_min.ico"
copy /Y "lib\third_party\Everything\windows\Everything64.dll" "build\Release\Everything64.dll"
)

31
exe_main.cpp Normal file
View File

@ -0,0 +1,31 @@
// Treat library files as a single-file header (single translation unit)
#include "lib_main.cpp"
#define BASE_RUN_TESTS 1
#define BUILD_WIN32_APPLICATION 1
#if BASE_RUN_TESTS
#include "lib/Base/run_tests.cpp"
#endif
#if BUILD_WIN32_APPLICATION
static_assert(OS_WINDOWS && COMPILER_MSVC);
#include <math.h>
#include <d3d11.h>
#include <dwmapi.h>
#pragma comment(lib, "d3d11.lib")
#pragma comment(lib, "dxgi.lib")
#pragma comment(lib, "msvcrt.lib")
#include "lib/third_party/dear-imgui/imgui.h"
#include "lib/third_party/dear-imgui/imgui_impl_win32.h"
#include "lib/third_party/dear-imgui/imgui_impl_dx11.h"
#include "src/ImGui_Supplementary.cpp"
#include "src/DX11.cpp"
#include "src/app_main.cpp"
#endif
// This is the #entry point
#include "src/Base_Entry_Point.cpp"

Binary file not shown.

BIN
extras/icons/tmp.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

BIN
extras/icons/tmp_min.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

93
extras/imgui.ini Normal file
View File

@ -0,0 +1,93 @@
[Window][Debug##Default]
Pos=0,0
Size=48,48
Collapsed=0
[Window][Hello, world!]
Size=1582,874
Collapsed=0
DockId=0x00000007,0
[Window][Dear ImGui Demo]
Pos=0,22
Size=2124,1511
Collapsed=0
DockId=0xC0DFADC4,0
[Window][DockSpace Demo]
Size=2560,1533
Collapsed=0
[Window][Dear ImGui Metrics/Debugger]
ViewportPos=1947,173
ViewportId=0x366E23FF
Size=435,462
Collapsed=0
[Window][WindowOverViewport_11111111]
Pos=0,0
Size=4529,1672
Collapsed=0
[Window][Font Settings]
Pos=3349,0
Size=1180,123
Collapsed=0
DockId=0x00000001,0
[Window][Test panel]
Pos=192,318
Size=691,540
Collapsed=0
[Window][Debug Panel]
Pos=3349,126
Size=1180,853
Collapsed=0
DockId=0x00000002,0
[Window][Control Panel]
Pos=3349,982
Size=1180,690
Collapsed=0
DockId=0x00000006,0
[Window][Enumerated Data Workspace]
Size=3346,1672
Collapsed=0
DockId=0x00000007,0
[Window][Main UI]
Size=3346,1672
Collapsed=0
DockId=0x00000007,0
[Window][Explorer Search]
Size=3346,1672
Collapsed=0
DockId=0x00000007,0
[Window][Explorer Search##Ex1]
Pos=0,0
Size=3346,1443
Collapsed=0
DockId=0x00000007,0
[Window][Executables##Ex1]
Pos=0,1446
Size=3346,226
Collapsed=0
DockId=0x00000008,0
[Docking][Data]
DockSpace ID=0x08BD597D Window=0x1BBC0F80 Pos=296,261 Size=4529,1672 Split=X Selected=0x1FC7AC8C
DockNode ID=0x00000003 Parent=0x08BD597D SizeRef=1373,1672 Split=Y Selected=0x18409996
DockNode ID=0x00000007 Parent=0x00000003 SizeRef=3346,1443 CentralNode=1 Selected=0x18409996
DockNode ID=0x00000008 Parent=0x00000003 SizeRef=3346,226 Selected=0xF994B570
DockNode ID=0x00000004 Parent=0x08BD597D SizeRef=1180,1672 Split=Y Selected=0xD2C573A7
DockNode ID=0x00000005 Parent=0x00000004 SizeRef=1351,979 Split=Y Selected=0xD2C573A7
DockNode ID=0x00000001 Parent=0x00000005 SizeRef=1180,123 Selected=0x355F9D19
DockNode ID=0x00000002 Parent=0x00000005 SizeRef=1180,853 Selected=0xD2C573A7
DockNode ID=0x00000006 Parent=0x00000004 SizeRef=1351,690 Selected=0xF930105C
DockSpace ID=0xC0DFADC4 Pos=0,51 Size=2560,1511 CentralNode=1 Selected=0x5E5F7166

24
lib/Base/Allocator.cpp Normal file
View File

@ -0,0 +1,24 @@
void* internal_alloc (s64 size) {
Allocator allocator = context_allocator();
void* result = allocator.proc(Allocator_Mode::ALLOCATE, size, 0, nullptr, allocator.data);
return result;
}
// #NOTE: internal_realloc does NOT copy anything! It just hands you new memory to work with!
void* internal_realloc (void* memory, s64 size, s64 old_size) {
Allocator allocator = context_allocator();
void* result = allocator.proc(Allocator_Mode::RESIZE, size, old_size, memory, allocator.data);
return result;
}
void internal_free (void* memory) {
Allocator allocator = context_allocator();
allocator.proc(Allocator_Mode::DEALLOCATE, 0, 0, memory, allocator.data);
}
void print_context_allocator () {
Assert(thread_context()->allocator.proc);
char* result = (char*)thread_context()->allocator.proc(Allocator_Mode::DETAILS, 0, 0, nullptr, thread_context()->allocator.data);
log_none("Current allocator details: %s", result);
}

125
lib/Base/Allocator.h Normal file
View File

@ -0,0 +1,125 @@
#pragma once
#define ALLOCATOR_DEBUG_MODE 1
#define ALLOCATOR_POISON_MEMORY_ON_ALLOCATION \
(BUILD_DEBUG && ALLOCATOR_DEBUG_MODE)
#if ALLOCATOR_POISON_MEMORY_ON_ALLOCATION
#define ALLOCATOR_INIT_VALUE 0xCD
#else
#define ALLOCATOR_INIT_VALUE 0
#endif
enum class Allocator_Mode: s32 {
ALLOCATE = 0,
RESIZE = 1,
DEALLOCATE = 2,
// IS_THIS_YOURS = 3,
DETAILS = 4, // #who_am_i
};
typedef void* (*Allocator_Proc)(Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
struct Allocator {
Allocator_Proc proc;
void* data;
bool operator ! () {
return (proc == nullptr);
}
bool operator==(const Allocator& other) const {
return proc == other.proc && data == other.data;
}
bool operator!=(const Allocator& other) const {
return !(*this == other);
}
};
// Public Allocator API:
// Note that alignment is handled on a per-allocator basis.
void* internal_alloc (s64 size);
void internal_free (void* memory);
void* internal_realloc (void* memory, s64 size, s64 old_size);
template <typename T> T* New (Allocator allocator, bool initialize=true) {
auto memory = (T*)allocator.proc(Allocator_Mode::ALLOCATE, sizeof(T), 0, nullptr, allocator.data);
if (initialize) {
(*memory) = T();
}
return memory;
}
template <typename T> T* New (bool initialize=true) {
auto memory = (T*)internal_alloc(sizeof(T));
if (initialize) {
(*memory) = T();
}
return memory;
}
// For raw-pointer arrays.
template <typename T> T* NewArray (Allocator allocator, s64 count, bool initialize=true) {
auto memory = (T*)allocator.proc(Allocator_Mode::ALLOCATE, count * sizeof(T), 0, nullptr, allocator.data);
if (initialize) {
for (s64 i = 0; i < count; i += 1) {
memory[i] = T();
}
}
return memory;
}
template <typename T> T* NewArray (s64 count, bool initialize=true) {
auto memory = (T*)internal_alloc(count * sizeof(T));
if (initialize) {
for (s64 i = 0; i < count; i += 1) {
memory[i] = T();
}
}
return memory;
}
// Likely will rarely be used, if ever. See: internal_realloc
template <typename T> force_inline T* Resize (Allocator allocator, void* memory, s64 size, s64 old_size, bool initialize=true) {
void* result = allocator.proc(Allocator_Mode::RESIZE, size, old_size, memory, allocator.data);
return result;
}
// There's not really any reason for this to be a template
force_inline void Delete (Allocator allocator, void* memory) {
allocator.proc(Allocator_Mode::DEALLOCATE, 0, 0, memory, allocator.data);
}
// We use internal functions when we assume the user just wants to use the
// current allocator on the context.
// For Resizes and Deletes, use internal_realloc and internal_free.
// #NOTE: Initialize<T> and reset_struct<T> are exactly the same!
template <typename T> force_inline void Initialize (T* memory) { (*memory) = T(); }
template <typename T> force_inline void reset_struct (T* src) { (*src) = T(); }
template <typename T> void zero_struct (T* src) {
memset(src, 0, sizeof(T));
}
template <typename T> void poison_struct (T* src) {
memset(src, 0xCD, sizeof(T));
}
template <typename T> T* copy_struct (T* src) {
T* dst = New<T>(false);
memcpy(dst, src, sizeof(T));
}
// #TODO: Turn this into a macro that also provides the
// file, line number to the print.
void print_context_allocator ();

372
lib/Base/Arena.cpp Normal file
View File

@ -0,0 +1,372 @@
#include "Arena.h"
#include "Arena_Windows.cpp"
force_inline void update_high_water_mark (Arena* arena) {
#if BUILD_DEBUG
s64 current_usage = arena_usage_bytes(arena);
if (current_usage > arena->high_water_mark) {
arena->high_water_mark = current_usage;
}
#endif
}
// For arrays, use `Array`, which is backed by the general purpose allocator
// or use `ArenaArray` if you need to expand the size to an unknown size.
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
Arena* arena = (Arena*)allocator_data;
Assert(arena != nullptr);
switch (mode) {
case Allocator_Mode::ALLOCATE: {
void* new_memory = arena_alloc(arena, requested_size);
update_high_water_mark(arena);
return new_memory;
} break;
case Allocator_Mode::RESIZE: {
// Assert(false); // DO NOT USE RESIZE WITH ARENAS! :ArenaResizing
// #TODO: Resize should check if current_point matches the end of the old allocation?
// and resize accordingly + pass back same pointer. Otherwise just make a new allocation and copy.
void* result = arena_alloc(arena, requested_size);
s64 size_to_copy = old_size < requested_size ? old_size : requested_size;
if (result && size_to_copy) {
memcpy(result, old_memory, size_to_copy);
}
update_high_water_mark(arena);
return result;
} break;
case Allocator_Mode::DEALLOCATE: {
return nullptr; // unused
} break;
case Allocator_Mode::DETAILS: {
if (allocator_data == nullptr) {
return "arena_allocator_proc: data pointer is null!";
}
return "arena_allocator_proc";
} break;
}
return nullptr;
}
// See arena_delete for decommissioning of arenas initialized with arena_init
// #NOTE: Don't use arena_init! use bootstrap_arena with a label!
void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 default_commit_page_count) {
Assert(arena != nullptr);
s64 commit_size = default_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE;
Assert(commit_size >= 0);
if (!is_valid(arena)) {
arena->reserve_size = new_reserve;
platform_init(arena, reserve_size(arena));
arena->current_point = arena_start(arena);
arena->first_uncommitted_page = arena->memory_base;
if (arena_commit_first_pages(arena, commit_size)) {
arena->initial_commit_page_count = default_commit_page_count;
// arena_lock_pages(arena, arena->memory_base, commit_size);
}
arena->alignment = ARENA_DEFAULT_ALIGNMENT;
arena->flags = Arena_Flags::None;
}
}
bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset) {
Assert(start_offset >= 0); // should NOT be negative.
if (commit_size < 0 || start_offset < 0) {
return false;
}
if (commit_size == 0) {
commit_size = PLATFORM_MEMORY_PAGE_SIZE; // always commit one page!
}
u8* final_address = Align<u8*>(arena->memory_base + commit_size + start_offset,
arena->alignment);
s64 commit_size_final = (s64)(final_address - arena->memory_base);
if (commit_size_final > reserve_size(arena)) {
Assert(false); // Commit size exceeds reserve size
return false;
}
if (final_address > arena->first_uncommitted_page) {
extend_committed_pages(arena, final_address);
}
return true;
}
// Arena* bootstrap_arena (Arena_Reserve new_reserve, s32 default_commit_page_count) {
Arena* bootstrap_arena_internal (Arena_Reserve new_reserve, s32 default_commit_page_count,
string label, string file_path, string function_name, s32 line_number) {
// + Save thread ID/name MAKE A COPY OBVIOUSLY! + PUSH default_allocator!
// WE USE default_allocator because this arena may be used to back an array!
s64 commit_size = default_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE;
Assert(commit_size <= reserve_size(new_reserve));
Arena new_arena = {};
arena_init(&new_arena, new_reserve, default_commit_page_count);
Arena* arena_ptr = (Arena*)new_arena.memory_base;
if (arena_ptr == nullptr) { Assert(false); // Assert in debug mode!
return nullptr;
}
memcpy(arena_ptr, &new_arena, sizeof(Arena));
arena_ptr->current_point = arena_start(arena_ptr);
arena_set_bootstrap_flag(arena_ptr);
#if BUILD_DEBUG
// #TODO: use thread_context()->stack_trace if present instead?
{ arena_ptr->label = copy_string_untracked(label);
arena_ptr->file_path = file_path;
arena_ptr->function_name = function_name;
arena_ptr->line_number = line_number;
add_arena_to_in_use_list(arena_ptr);
}
#endif
return arena_ptr;
}
void arena_clear_flags (Arena* arena) { arena->flags = Arena_Flags::None; }
void arena_set_chained_flag (Arena* arena) { arena->flags |= Arena_Flags::Chained; }
bool arena_is_chained (Arena* arena) { return (bool)(arena->flags & Arena_Flags::Chained); }
void arena_set_bootstrap_flag (Arena* arena) { arena->flags |= Arena_Flags::Is_Bootstrapped; }
bool arena_is_bootstrapped (Arena* arena) { return (bool)(arena->flags & Arena_Flags::Is_Bootstrapped); }
// void arena_set_secure_flag (Arena* arena) { arena->flags |= Arena_Flags::Secure_Arena; }
void arena_reset_keeping_memory (Arena* arena) {
if (!is_valid(arena)) return;
arena->current_point = arena_start(arena);
}
void arena_reset (Arena* arena) {
if (!is_valid(arena)) return;
arena->current_point = arena_start(arena);
free_pages_down_to(arena, arena->initial_commit_page_count);
}
void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_function) {
if (!is_valid(arena)) return;
Assert(wipe_function != nullptr);
if (wipe_function == nullptr) return;
wipe_function(arena_start(arena), (u64)(arena->current_point - arena_start(arena)));
arena_reset(arena);
}
void* arena_alloc (Arena* arena, s64 byte_count) {
Assert(arena != nullptr);
if (!arena->memory_base) { arena_init(arena, arena->reserve_size); }
Assert(is_valid(arena));
u8* result = Align<u8*>(arena->current_point, arena->alignment);
u8* result_end = result + byte_count;
if (result_end > arena->first_uncommitted_page) {
if (result_end > arena_address_limit(arena)) {
printf("[Error] Failed to allocate because Arena is full and cannot expand!\n");
Assert(false); // Failed to allocate because arena is full and cannot expand!
} else {
extend_committed_pages(arena, result_end);
}
}
arena->current_point = result_end;
return result;
}
u8* arena_start (Arena* arena) {
// This assumes every arena is bootstrapped, so there may be some
// wastage/cache misses caused by this!
return Align(arena->memory_base + sizeof(Arena), ARENA_DEFAULT_ALIGNMENT);
}
u8* arena_address_limit (Arena* arena) {
return (arena->memory_base + reserve_size(arena));
}
bool is_valid (Arena* arena) {
return (arena != nullptr) && (arena->memory_base != nullptr);
}
s64 reserve_size (Arena* arena) {
return reserve_size(arena->reserve_size);
}
s64 reserve_size (Arena_Reserve ar) {
switch (ar) {
case Arena_Reserve::Size_64K: return KB(64);
case Arena_Reserve::Size_2M: return MB(2);
case Arena_Reserve::Size_64M: return MB(64);
case Arena_Reserve::Size_2G: return GB(2);
case Arena_Reserve::Size_64G: return GB(64);
case Arena_Reserve::Size_2T: return TB(2);
}
Assert(false); // This should not happen.
return 0;
}
Arena_Reserve next_reserve_size (s64 size) {
for (u8 i = 0; i < Arena_Reserve_Count; i += 1) {
if (size <= Arena_Sizes[i]) {
return (Arena_Reserve)i;
}
}
return Arena_Reserve::Size_64T;
}
// arena_usage_bytes is kinda pointless tbh.
s64 arena_usage_bytes (Arena* arena) { return (s64)(arena->current_point - arena_start(arena)); }
s64 arena_usage_committed_bytes (Arena* arena) { return (s64)(arena->first_uncommitted_page - arena->memory_base); }
// for arena details, I need to setup my string builder first.
Allocator allocator (Arena* arena) {
return { arena_allocator_proc, arena };
}
struct Auto_Reset {
Arena* arena;
u8* starting_point;
Auto_Reset(Arena* arena) {
Assert(is_valid(arena));
this->arena = arena;
this->starting_point = arena->current_point;
}
// #TODO: Implement with ExpandableArena (probably just use the same implementation as Auto_Release?)
// Auto_Reset(ExpandableArena* arena_ex) {
//
// }
~Auto_Reset() {
arena->current_point = starting_point;
}
};
struct Auto_Release {
bool is_expandable;
ExpandableArena* arena_ex;
Arena* arena;
u8* starting_point;
Auto_Release(ExpandableArena* arena_ex) {
this->arena_ex = arena_ex;
this->arena = arena_ex->current;
this->starting_point = arena_ex->current->current_point;
this->is_expandable = true;
}
Auto_Release(Arena* arena) {
this->arena_ex = nullptr;
this->arena = arena;
this->starting_point = arena->current_point;
this->is_expandable = false;
Assert(is_valid(arena));
}
~Auto_Release() {
if (is_expandable) {
arena_reset_to(arena_ex, arena, starting_point);
} else {
arena->current_point = starting_point;
free_pages_down_to(arena, arena->initial_commit_page_count);
}
}
};
// #FixedArena procedures:
FixedArena* bootstrap_fixed_arena (s64 size, Allocator backing_allocator) {
push_allocator(backing_allocator);
Assert(size >= sizeof(FixedArena));
ArrayView<u8> memory = ArrayView<u8>(size);
FixedArena* result = (FixedArena*)memory.data;
result->memory = memory;
result->cursor = sizeof(FixedArena);
result->allocator = backing_allocator;
return result;
}
force_inline void destroy_arena (FixedArena* arena) {
Delete(arena->allocator, arena);
}
Allocator allocator (FixedArena* arena) {
return { fixed_arena_allocator_proc, arena };
}
bool is_valid (FixedArena* arena) {
if (!arena) return false;
return is_valid(arena->memory);
}
void* fixed_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
constexpr s64 DEFAULT_ALIGNMENT = 16; // maybe make this modifiable as part of FixedArena struct?
FixedArena* arena = (FixedArena*)allocator_data;
Assert(arena != nullptr);
switch (mode) {
case Allocator_Mode::ALLOCATE: {
arena->cursor = Align<s64>(arena->cursor, DEFAULT_ALIGNMENT);
void* result = &arena->memory[arena->cursor];
arena->cursor += requested_size;
Assert(arena->cursor <= arena->memory.count);
return result;
} break;
case Allocator_Mode::RESIZE: {
arena->cursor = Align<s64>(arena->cursor, DEFAULT_ALIGNMENT);
void* result = &arena->memory[arena->cursor];
arena->cursor += requested_size;
Assert(arena->cursor <= arena->memory.count);
s64 size_to_copy = old_size < requested_size ? old_size : requested_size;
if (result && size_to_copy) { memcpy(result, old_memory, size_to_copy); }
return result;
} break;
case Allocator_Mode::DEALLOCATE: {
return nullptr; // unused
} break;
case Allocator_Mode::DETAILS: {
if (allocator_data == nullptr) {
return "fixed_arena_allocator_proc: data pointer is null!";
}
return "fixed_arena_allocator_proc: with valid data";
} break;
}
return nullptr;
}
force_inline void initialize_arenas_in_use_list () {
#if BUILD_DEBUG
if (arenas_in_use.allocated > 0) return;
mutex_init(&arenas_in_use_mutex);
arenas_in_use.allocator = default_allocator();
array_reserve(arenas_in_use, 256);
#endif
}
force_inline void add_arena_to_in_use_list (Arena* arena) {
#if BUILD_DEBUG
Assert(arenas_in_use.allocated > 0); // check we initialized!
lock_guard(&arenas_in_use_mutex);
array_add(arenas_in_use, arena);
#endif
}
force_inline void remove_arena_from_in_use_list (Arena* arena) {
#if BUILD_DEBUG
Assert(arenas_in_use.allocated > 0); // check we initialized!
lock_guard(&arenas_in_use_mutex);
array_unordered_remove_by_value(arenas_in_use, arena, 1);
#endif
}

199
lib/Base/Arena.h Normal file
View File

@ -0,0 +1,199 @@
#pragma once
struct ExpandableArena; // fwd declare #temp
#if OS_WINDOWS
const u32 ARENA_DEFAULT_COMMIT_PAGE_COUNT = 16; // 16 * 4k page = 64kB
const s64 ARENA_DEFAULT_COMMIT_SIZE_BYTES = 65536;
#endif
constexpr u16 ARENA_DEFAULT_ALIGNMENT = CPU_REGISTER_WIDTH_BYTES;
#define ARENA_DEBUG BUILD_DEBUG
constexpr s32 Arena_Reserve_Count = 7;
enum class Arena_Reserve: u8 {
Size_64K = 0, // these are used as indices in Arena_Free_List!
Size_2M = 1,
Size_64M = 2,
Size_2G = 3,
Size_64G = 4,
Size_2T = 5,
Size_64T = 6,
};
constexpr s64 Arena_Sizes[Arena_Reserve_Count] = {
64LL * 1024,
2LL * 1024 * 1024,
64LL * 1024 * 1024,
2LL * 1024 * 1024 * 1024,
64LL * 1024 * 1024 * 1024,
2LL * 1024 * 1024 * 1024 * 1024,
64LL * 1024 * 1024 * 1024 * 1024,
};
enum class Arena_Flags: u8 {
None = 0,
Chained = 0x01,
Is_Bootstrapped = 0x02,
Large_Pages = 0x40,
// Secure_Arena = 0xF0 // #NOTE: Secure Arenas are not implemented yet!
};
force_inline Arena_Flags operator | (Arena_Flags a, Arena_Flags b) {
return (Arena_Flags)(((u8)a) | ((u8)b));
}
force_inline Arena_Flags operator & (Arena_Flags a, Arena_Flags b) {
return (Arena_Flags)(((u8)a) & ((u8)b));
}
force_inline Arena_Flags& operator |= (Arena_Flags& a, Arena_Flags b) {
a = a | b;
return a;
}
force_inline Arena_Flags operator ~ (Arena_Flags a) {
return (Arena_Flags)(~((u8)a));
}
struct Arena {
u8* current_point = nullptr;
u8* memory_base = nullptr;
u8* first_uncommitted_page = nullptr;
u16 alignment = CPU_REGISTER_WIDTH_BYTES;
Arena_Reserve reserve_size = Arena_Reserve::Size_64K;
Arena_Flags flags = Arena_Flags::None;
u32 initial_commit_page_count = ARENA_DEFAULT_COMMIT_PAGE_COUNT;
#if BUILD_DEBUG
s64 high_water_mark;
string label;
string file_path;
string function_name;
s32 line_number;
#endif
};
typedef void* (*Memory_Wipe_Function)(void* memory, u64 byte_count);
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
// Main API
#if BUILD_DEBUG
#define bootstrap_arena(_reserve_, _label_) \
bootstrap_arena_internal((_reserve_), (s32)ARENA_DEFAULT_COMMIT_PAGE_COUNT, (_label_), __FILE__, __FUNCTION__, __LINE__)
#else
#define bootstrap_arena(_reserve_, _label_) \
bootstrap_arena_internal((_reserve_), (s32)ARENA_DEFAULT_COMMIT_PAGE_COUNT, (_label_))
#endif
Arena* bootstrap_arena_internal (Arena_Reserve new_reserve, s32 commit_page_count=ARENA_DEFAULT_COMMIT_PAGE_COUNT,
string label="", string file_path="", string function_name="", s32 line_number=0);
void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 commit_page_count=16); // For when we're *not* bootstrapping arenas: (I'm debating if we should keep this..)
bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset=0); // This is useful for initializing arenas (arena_init), and for starting Arena-backed arrays.
void arena_clear_flags (Arena* arena);
void arena_set_bootstrap_flag (Arena* arena);
void arena_set_secure_flag (Arena* arena);
bool arena_is_bootstrapped (Arena* arena);
void arena_reset_keeping_memory (Arena* arena); // just sets current point to arena_start
void arena_reset (Arena* arena); // frees excess pages
void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_function);
// Internal API (should not be called directly by program)
void* arena_alloc (Arena* arena, s64 byte_count);
// Utilties
u8* arena_start (Arena* arena);
u8* arena_address_limit (Arena* arena);
s64 arena_usage_bytes (Arena* arena);
s64 arena_usage_committed_bytes (Arena* arena);
// arena_details (requires print)
s64 reserve_size (Arena* arena);
s64 reserve_size (Arena_Reserve ar);
bool is_valid (Arena* arena);
Allocator allocator (Arena* arena);
// Platform-Specific Implementations (forward-declared)
void platform_init (Arena* arena, s64 new_reserve);
void extend_committed_pages (Arena* arena, u8* end);
void free_pages_down_to (Arena* arena, s64 pages_to_keep);
void arena_delete (Arena* arena);
Arena_Reserve next_reserve_size (s64 size);
struct Push_Alignment {
Arena* arena;
u16 original_alignment;
Push_Alignment(Arena* arena, u16 alignment) {
Assert(is_valid(arena));
this->arena = arena;
this->original_alignment = arena->alignment;
this->arena->alignment = alignment;
}
Push_Alignment(ExpandableArena* arena_ex, u16 alignment) {
Push_Alignment((Arena*)arena_ex, alignment);
}
~Push_Alignment() {
arena->alignment = original_alignment;
}
};
// Do this later:
// arena_lock_pages :: (using arena: *Arena, start_address: *u8, byte_count: s64)
// arena_unlock_pages :: (using arena: *Arena, start_address: *u8, byte_count: s64)
// #FixedArena is a super simple arena where you allocate a fixed block up front (fully committed),
// and use it as-is.
// #NOTE: we can save space be always backing with a known allocator (e.g. default_allocator()).
struct FixedArena {
ArrayView<u8> memory;
s64 cursor;
Allocator allocator;
};
void* fixed_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
// #FixedArena API
FixedArena* bootstrap_fixed_arena (s64 size, Allocator backing_allocator = { default_allocator_proc, nullptr });
force_inline void destroy_arena (FixedArena* arena);
Allocator allocator (FixedArena* arena);
bool is_valid (FixedArena* arena);
s64 bytes_in_use (ArrayView<Arena*> arenas) {
// does not include overhead from committed pages!
s64 sum = 0;
for (s64 i = 0; i < arenas.count; i += 1) {
sum += arena_usage_bytes(arenas[i]);
}
return sum;
}
s64 committed_bytes (ArrayView<Arena*> arenas) {
s64 sum = 0;
for (s64 i = 0; i < arenas.count; i += 1) {
sum += arena_usage_committed_bytes(arenas[i]);
}
return sum;
}
#if BUILD_DEBUG
global Mutex arenas_in_use_mutex;
global Array<Arena*> arenas_in_use;
force_inline void initialize_arenas_in_use_list ();
force_inline void add_arena_to_in_use_list(Arena* arena);
force_inline void remove_arena_from_in_use_list (Arena* arena);
#endif

324
lib/Base/Arena_Array.h Normal file
View File

@ -0,0 +1,324 @@
#pragma once
#if BUILD_DEBUG
constexpr s64 ARRAY_ARENA_START_OFFSET = 2 * CPU_CACHE_LINE_SIZE; // sizeof(Arena)+sizeof(array)
#else
constexpr s64 ARRAY_ARENA_START_OFFSET = 1 * CPU_CACHE_LINE_SIZE; // double check this lol
#endif
template <typename T>
struct ArenaArray { // #downcasts to an ArrayView.
using ValueType = T;
s64 count;
T* data;
s64 allocated;
Arena* arena;
ArenaArray() {
memset(this, 0, sizeof(*this));
}
T& operator[] (s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return static_cast<T*>(data)[index];
}
};
template <typename T> s64 arena_array_usage_bytes (ArenaArray<T>* array) {
return (ARRAY_ARENA_START_OFFSET + array->count * sizeof(T));
}
static_assert(ARRAY_ARENA_START_OFFSET >= sizeof(Arena) + sizeof(ArenaArray<u8>));
// #NOTE: I am not defining arena_array_init (ArenaArray<T>*), because I do not want to
// encourage it's usage!
// This may not be correct - check!
template <typename T> void arena_array_init (ArenaArray<T>* array, Arena_Reserve reserve_size, s64 preallocate_count, string label="") {
string arena_label = format_string(temp(), "ArenaArray:init (%s)", label.data);
array->arena = bootstrap_arena(reserve_size, arena_label);
push_arena(array->arena);
push_alignment(array->arena, 1);
s64 commit_size_bytes = preallocate_count * sizeof(T);
if (arena_commit_first_pages(array->arena, commit_size_bytes, ARRAY_ARENA_START_OFFSET)) {
array->allocated = preallocate_count;
}
array->count = 0;
array->data = array_start<T>(*array);
}
// Use arena_array_free to delete, see: array_reset and reset_keeping_memory for resetting.
template <typename T>
ArenaArray<T>* arena_array_new (s64 preallocate_count, Arena_Reserve reserve_size, string label="") {
string arena_label = format_string(temp(), "ArenaArray:new (%s)", label.data);
Arena* arena = bootstrap_arena(reserve_size, arena_label);
push_arena(arena);
push_alignment(arena, 1);
ArenaArray<T>* array = New<ArenaArray<T>>(true);
array->arena = arena;
s64 commit_size_bytes = preallocate_count * sizeof(T);
if (arena_commit_first_pages(array->arena, commit_size_bytes, ARRAY_ARENA_START_OFFSET)) {
array->allocated = preallocate_count;
}
array->count = 0;
array->arena = arena;
array->data = array_start<T>(*array);
return array;
}
template <typename T> T* array_start (ArenaArray<T>& array) {
T* memory = (T*)(array.arena->memory_base + ARRAY_ARENA_START_OFFSET);
Assert((u8*)memory >= (u8*)array.arena->current_point);
return memory;
}
s64 max_array_size (ArenaArray<u8>& array) {
u8* address_limit = array.arena->memory_base + reserve_size(array.arena);
u8* address_start = array_start(array);
return (s64)(address_limit - address_start);
}
template <typename T> bool is_valid (ArenaArray<T>* array) {
if (array == nullptr) return false;
if (array->arena == nullptr) return false;
return is_valid(array->arena);
}
template <typename T> bool is_empty (ArenaArray<T>& array) {
return ((array.count == 0) || !is_valid(array.arena));
}
template <typename T> s64 memory_usage (ArenaArray<T>& array) {
if (array == nullptr) return 0;
return arena_usage_committed_bytes(array.arena);
}
template <typename T> void arena_array_free (ArenaArray<T>& array) {
arena_delete(array.arena);
}
template <typename T> ArrayView<T> array_view (ArenaArray<T> array) {
ArrayView<T> av;
av.count = array.count;
av.data = array.data;
return av;
}
template <typename T> ArrayView<T> to_view (ArenaArray<T>& array) {
ArrayView<T> av;
av.count = array.count;
av.data = array.data;
return av;
}
template <typename T> ArrayView<T> to_view (ArenaArray<T>& array, s64 start_offset, s64 count) {
Assert(start_offset >= 0); Assert(count >= 0);
ArrayView<T> av = { 0, nullptr };
if (start_offset >= array.count) {
return av; // empty
}
av.count = count;
av.data = array.data + start_offset;
if (start_offset + count > array.count) {
av.count = array.count - offset;
}
return av;
}
template <typename T> void copy_from_view (ArenaArray<T>* array, ArrayView<T> view) {
T* start = array->data + array->count;
s64 new_count = array->count + view.count;
array_resize(*array, new_count, false);
memcpy(start, view.data, view.count * sizeof(T));
}
template <typename T> void array_add (ArenaArray<T>& array, ArrayView<T> items) {
T* current_point = &array.data[array.count];
s64 final_count = array.count + items.count;
if (array.allocated < final_count) {
array_reserve(array, final_count);
}
memcpy(current_point, items.data, items.count * sizeof(T));
array.count += items.count;
}
template <typename T> void array_add (ArenaArray<T>& array, T item) {
maybe_grow(array);
array.data[array.count] = item;
array.count += 1;
}
template <typename T> T* array_add (ArenaArray<T>& array) {
maybe_grow(array);
T* result = &array.data[array.count];
(*result) = T();
array.count += 1;
return result;
}
template <typename T> force_inline void maybe_grow (ArenaArray<T>& array) {
if (array.count >= array.allocated) {
s64 reserve = 2 * array.allocated;
if (reserve < 8)
reserve = 8; // so we don't try to reserve 0 bytes.
reserve_internal((ArenaArray<u8>&)array, reserve, sizeof(T));
}
}
template <typename T> force_inline void array_reserve (ArenaArray<T>& array, s64 desired_item_count) {
reserve_internal((ArenaArray<u8>&)array, desired_item_count, sizeof(T));
}
template <typename T> void array_resize (ArenaArray<T>& array, s64 desired_item_count, bool initialize=true) {
s64 old_count = array.count;
reserve_internal((ArenaArray<u8>&)array, desired_item_count, sizeof(T));
array.count = desired_item_count;
if (initialize) {
init_range(array.data, old_count, desired_item_count);
}
}
void array_arena_realloc (ArenaArray<u8>& array, s64 new_size, s64 old_size) {
Assert(new_size <= max_array_size(array));
u8* array_begin = (u8*)array.data;
void* result_end = (void*)(array_begin + new_size);
// Check if we need more pages:
if (result_end > array.arena->first_uncommitted_page) {
// Critical error if we run out of address space!
if (result_end > arena_address_limit(array.arena)) {
printf("[Error] Failed to allocate because Arena is full and cannot expand!\n");
Assert(false); // Failed to allocate because Arena is full and cannot expand
return;
}
extend_committed_pages(array.arena, (u8*)result_end);
}
}
void reserve_internal (ArenaArray<u8>& array, s64 desired_item_count, s64 element_size) {
if (desired_item_count <= array.allocated) return;
array_arena_realloc(array, desired_item_count * element_size, array.allocated * element_size);
array.allocated = desired_item_count;
}
template <typename T> void init_range (T* ptr, s64 start_offset, s64 end_offset) {
for (s64 i = start_offset; i < end_offset; i += 1) {
T* current_item = ptr + i;
(*current_item) = T(); // is this correct in-place init?
}
}
template <typename T> void array_poison_range (ArenaArray<T>& array, s64 start, s64 count) {
#if BUILD_DEBUG
if (count == 0) return;
Assert(start >= 0 && start < array.count);
Assert(start + count <= array.count);
// Check that these ranges make sense
T* start_address = &array[start];
memset(start_address, 0xCD, count * sizeof(T));
#endif
}
template <typename T> force_inline void array_reset (ArenaArray<T>& array) {
// reset backing arena:
arena_reset(array.arena);
array.count = 0;
// should be # of bytes committed / sizeof(T):
s64 committed_bytes = (s64)(array.arena->first_uncommitted_page - array.data);
array.allocated = committed_bytes / sizeof(T);
}
template <typename T> force_inline void reset_keeping_memory (ArenaArray<T>& array) {
array.count = 0;
}
template <typename T> force_inline void ordered_remove_by_index (ArenaArray<T>& array, s64 index) {
Assert(index >= 0); Assert(index < array.count);
for (s64 i = index; i < array.count-1; i += 1) {
array.data[i] = array.data[i - 1];
}
array.count -= 1;
}
template <typename T> force_inline void unordered_remove_by_index (ArenaArray<T>& array, s64 index) {
Assert(index >= 0); Assert(index < array.count);
s64 last_index = array.count - 1;
if index != last_index { array.data[index] = array.data[last_index]; }
array.count -= 1;
}
template <typename T> void insert_at(ArenaArray<T>& array, s64 offset, ArenaArray<T>& src_array) {
Assert(offset >= 0 && offset <= array.count);
if (!is_valid(src_array)) return;
s64 new_count = array.count + src_array.count;
array_reserve(array, new_count);
T* src = array.data + offset;
T* dst = src + src_array.count;
memcpy(dst, src, (array.count - offset) * sizeof(T));
memcpy(array.data + offset, src_array.data, src_array.count * sizeof(T));
}
template <typename T> T pop (ArenaArray<T>& array) {
T result = array.data[array.count-1];
array.count -= 1;
return result;
}
template <typename T> T peek (ArenaArray<T>& array) {
return array.data[array.count-1];
}
template <typename T> T* peek_pointer (ArenaArray<T>& array) {
return &(array.data[array.count-1]);
}
template <typename T> void delete_range (ArenaArray<T>& array, s64 start_offset, s64 count) {
Assert(start_offset >= 0 && count >= 0 && start_offset + count <= array.count);
memcpy(array.data + start_offset, array.data + start_offset + count, (array.count - start_offset - count) * sizeof(T));
array.count -= count;
}
template <typename T> ArenaArray<T>& array_copy (ArenaArray<T>& array) {
auto new_array = arena_array_new<T>(array.arena.reserve_size);
array_reserve(new_array, array.count);
memcpy(new_array.data, array.data, array.count * sizeof(T));
new_array.count = array.count;
return new_array;
}
template <typename T> ArrayView<T> array_copy_as_view (ArenaArray<T>& array) {
ArrayView<T> view = { array.count, array.data };
return view;
}

290
lib/Base/Arena_Hash_Table.h Normal file
View File

@ -0,0 +1,290 @@
// #ArenaTableConfusion I just realized ArenaTable (a hash-table backed by an Arena allocator) may be confused with Arena_Table (which is a table tracking free Arenas)
// Solution: rename Arena_Table, to Arena_Free_List.
// Should ArenaTable be bootstrapped like String_Builder?
// see: new_string_builder
template <typename T, typename U>
struct ArenaTable {
using KeyType = T; using ValueType = U;
ArenaArray<Table_Entry<T, U>>* entries = {};
s64 count = 0;
s64 slots_filled = 0;
Hash_Function hash_function = nullptr;
Hash_Compare_Function compare_function = nullptr;
s64 add_collisions = 0;
s64 find_collisions = 0;
u32 load_factor_percent = 70;
bool refill_removed = true;
bool count_collisions = false;
};
template <typename T, typename U> bool table_is_valid (ArenaTable<T, U>* table) {
if (table == nullptr) return false;
if (table->entries == nullptr) return false;
if (table->entries->allocated == 0) return false;
if (table->hash_function == nullptr) return false;
if (table->compare_function == nullptr) return false;
return true;
}
// table_release
template <typename T, typename U> void table_init (ArenaTable<T, U>* table, s64 slots_to_allocate=64, Arena_Reserve new_reserve=Arena_Reserve::Size_64M, string label="") {
s64 n = Next_Power_Of_Two(slots_to_allocate);
string table_label = format_string(temp(), "ArenaTable (%s)", label.data);
table->entries = arena_array_new<Table_Entry<T, U>> (n, new_reserve);
array_resize(*table->entries, n, false); // don't init
for (s64 i = 0; i < n; i += 1) {
(*table->entries)[i].hash = 0;
}
// default hash and compare functions:
table->hash_function = table_hash_function_fnv1a;
table->compare_function = u64_keys_match;
}
// Adds given key value pair to the table, returns a pointer to the inserted value.
template <typename T, typename U> U* table_add (ArenaTable<T, U>* table, T key, U value) {
Assert(table_is_valid(table));
Assert(table->load_factor_percent < 100);
if ( ((table->slots_filled + 1) * 100) >= (table->entries->allocated * table->load_factor_percent) ) {
table_resize(table, Next_Power_Of_Two(table->entries->allocated + 64));
}
Assert(table->slots_filled < table->entries->allocated);
// #Walk_Table
u32 mask = (u32)(table->entries->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = (*table->entries)[index].hash;
while (table_while_loop) {
if (table->refill_removed) {
if ((*table->entries)[index].hash == HASH_TABLE_REMOVED_HASH) {
table->slots_filled -= 1; // 1 will get re-added below, for total increment 0.
break;
}
}
if (table->count_collisions) {
table->add_collisions += 1;
}
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = (*table->entries)[index].hash;
}
// Walk_Table walked us to an unused entry, so add our new data into this slot:
table->count += 1;
table->slots_filled += 1;
Table_Entry<T, U>* entry = &(*table->entries)[index];
entry->hash = hash;
entry->key = key;
entry->value = value;
return &entry->value;
}
template <typename T, typename U> U* table_find_pointer (ArenaTable<T, U>* table, T key) {
Assert(table_is_valid(table));
if (!table_is_valid(table)) return nullptr;
// #Walk_Table
u32 mask = (u32)(table->entries->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = (*table->entries)[index].hash;
while (table_while_loop) {
Table_Entry<T, U>* entry = &(*table->entries)[index];
if (entry->hash == hash) {
if (table->compare_function(&entry->key, &key)) {
return &entry->value;
}
}
if (table->count_collisions) { table->find_collisions += 1; }
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = (*table->entries)[index].hash;
}
return nullptr;
}
template <typename T, typename U> U* table_set (ArenaTable<T, U>* table, T key, U value) {
U* value_ptr = table_find_pointer(table, key);
if (value_ptr) {
(*value_ptr) = value;
return value_ptr;
} else {
return table_add(table, key, value);
}
}
template <typename T, typename U> void table_resize (ArenaTable<T, U>* table, s64 slots_to_allocate) {
s64 initial_count = table->entries->count;
Assert(slots_to_allocate > initial_count);
if (slots_to_allocate <= initial_count) {
return;
}
// #TODO: When you resize you need to reinsert all the values, so we
// need a temporary copy of the original data:
// Note that we don't need to free old_entries!
ArrayView<Table_Entry<T, U>> old_entries = array_copy(temp(), to_view(*table->entries));
s64 n = Next_Power_Of_Two(slots_to_allocate);
array_resize(*table->entries, n, false);
table->count = 0;
table->slots_filled = 0;
// Initialize new values:
for (s64 i = 0; i < old_entries.count; i += 1) {
Table_Entry<T, U> entry = old_entries[i];
// if entry is valid!
if (entry.hash > HASH_TABLE_FIRST_VALID_HASH) {
table_add(table, entry.key, entry.value);
}
}
}
template <typename T, typename U> void table_reset (ArenaTable<T, U>* table, bool keep_memory=true) {
table->count = 0;
table->slots_filled = 0;
for (s64 i = 0; i < table->entries->count; i += 1) {
(*table->entries)[i].hash = 0;
}
if (!keep_memory) { array_reset(*table->entries); }
}
template <typename T, typename U> void table_release (ArenaTable<T, U>* table) {
arena_array_free(*table->entries);
#if BUILD_DEBUG
poison_struct(table);
#endif
}
template <typename T, typename U> bool table_contains (ArenaTable<T, U>* table, T key) {
return (table_find_pointer(table, key) != nullptr);
}
template <typename T, typename U> bool table_find (ArenaTable<T, U>* table, T key, U* value) {
U* pointer = table_find_pointer(table, key);
if (pointer) {
(*value) = (*pointer);
return true;
}
return false;
}
template <typename T, typename U> bool table_remove (ArenaTable<T, U>* table, T key, U* value) {
Assert(table_is_valid(table));
if (!table_is_valid(table)) return nullptr;
// #Walk_Table
u32 mask = (u32)(table->entries->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = (*table->entries)[index].hash;
while (table_while_loop) {
Table_Entry<T, U>* entry = &(*table->entries)[index];
if ((entry->hash == hash) && table->compare_function(&entry->key, &key)) {
entry->hash = HASH_TABLE_REMOVED_HASH;
table->count -= 1;
(*value) = entry->value;
return true;
}
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = (*table->entries)[index].hash;
}
return false;
}
// #TODO: we need a for expansion iterator?
// table_find_multiple (put results in Temp-backed Array<>, and return it as an ArrayView<T>) {
template <typename T, typename U> ArrayView<U> table_remove (ArenaTable<T, U>* table, T key, U* value) {
Array<U> results;
results.allocator = temp();
// #Walk_Table
u32 mask = (u32)(table->entries->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = (*table->entries)[index].hash;
while (table_while_loop) {
Table_Entry<T, U>* entry = &(*table->entries)[index];
if (entry->hash == hash) {
if (table->compare_function(&entry->key, &key)) {
array_add(results, entry->value);
} else {
if (table->count_collisions) { table->find_collisions += 1; }
}
} else {
if (table->count_collisions) { table->find_collisions += 1; }
}
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = (*table->entries)[index].hash;
}
return to_view(results);
}

View File

@ -0,0 +1,83 @@
#if OS_WINDOWS
// This file is included in Arena.cpp, so we don't need to include headers here.
void platform_init (Arena* arena, s64 new_reserve) {
s64 page_aligned_reserve_size = Align_To_Page_Size(new_reserve);
void* address_start =
VirtualAlloc(nullptr, (u64)page_aligned_reserve_size, MEM_RESERVE, PAGE_READWRITE);
// printf("Reserving range %p:%p (size: %llu)\n", address_start, (u8*)address_start + page_aligned_reserve_size, page_aligned_reserve_size);
if (address_start == nullptr) {
s32 error_code = GetLastError();
printf("In Arena:platform_init, VirtualAlloc failed with code %d\n", error_code);
return;
}
arena->memory_base = (u8*)address_start;
}
void extend_committed_pages (Arena* arena, u8* end) {
s64 delta = end - arena->first_uncommitted_page;
Assert(delta >= 0);
s64 size = Align_To_Page_Size(delta);
VirtualAlloc(arena->first_uncommitted_page, (u64)size, MEM_COMMIT, PAGE_READWRITE);
// printf("Committing range %p:%p (size: %llu)\n", arena->first_uncommitted_page, (u8*)arena->first_uncommitted_page + size, size);
// arena_lock_pages(arena, arena->first_uncommitted_page, size);
arena->first_uncommitted_page += size;
}
void free_pages_down_to (Arena* arena, s64 pages_to_keep) {
if (arena == nullptr) return;
Assert(pages_to_keep >= 1); // Always keep one page because we bootstrap a lot.
s64 bytes_to_keep = pages_to_keep * PLATFORM_MEMORY_PAGE_SIZE;
if (bytes_to_keep > reserve_size(arena)) {
Assert(false); // Break in debug builds, but release we just do nothing.
return; // just do nothing here. Maybe we should assert?
}
u64 bytes_committed = (u64)(arena->first_uncommitted_page - arena->memory_base);
if (bytes_to_keep > (s64)bytes_committed) {
return; // nothing to free
}
u8* start_address = arena->memory_base + bytes_to_keep;
u64 bytes_to_decommit = (u64)(arena->first_uncommitted_page - start_address);
// arena_unlock_pages(arena, start_address, (s64)bytes_to_decommit);
if (bytes_to_decommit) {
VirtualFree(start_address, bytes_to_decommit, MEM_DECOMMIT);
// printf("Freeing pages %p:%p (size: %llu)\n", start_address, (u8*)start_address + bytes_to_decommit, bytes_to_decommit);
}
arena->first_uncommitted_page = start_address;
}
void arena_delete (Arena* arena) {
if (!is_valid(arena)) return;
#if BUILD_DEBUG
{ //default_allocator_free(arena->file_path.data);
//default_allocator_free(arena->function_name.data);
string_free_untracked(arena->label);
remove_arena_from_in_use_list(arena);
}
#endif
// s64 size_tmp = reserve_size(arena);
// printf("Releasing range %p:%p (size: %llu)\n", arena->memory_base, (u8*)arena->memory_base + size_tmp, size_tmp);
bool arena_was_boostrapped = (arena->flags & Arena_Flags::Is_Bootstrapped) == Arena_Flags::Is_Bootstrapped;
VirtualFree(arena->memory_base, 0, MEM_RELEASE);
if (!arena_was_boostrapped) {
arena->memory_base = nullptr;
}
}
#endif

411
lib/Base/Array.h Normal file
View File

@ -0,0 +1,411 @@
// Strongly influenced by Array.jai in Basic module.
#pragma once
// For Arena-Backed arrays use ArenaArray
MSVC_RUNTIME_CHECKS_OFF
template <typename T>
struct Array { // #downcasts to an ArrayView.
using ValueType = T;
s64 count;
T* data;
s64 allocated;
Allocator allocator;
Array() {
memset(this, 0, sizeof(*this));
}
Array (Allocator _allocator, s64 reserve_count=0, s64 resize_count=0, bool initialize=false) {
memset(this, 0, sizeof(*this));
allocator = _allocator;
array_reserve(*this, reserve_count);
array_resize(*this, resize_count, initialize);
}
Array(s64 new_count, void* new_data, s64 _allocated) {
count = new_count;
data = (T*)new_data;
allocated = _allocated;
allocator = context_allocator();
}
Array(Allocator _allocator, s64 new_count, void* new_data, s64 _allocated) {
count = new_count;
data = (T*)new_data;
allocated = _allocated;
allocator = _allocator;
}
T& operator[](s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return static_cast<T*>(data)[index];
}
};
template <typename T> bool is_resizable (Array<T>& src) {
// If we have a valid allocator, we assume this is resizeable.
return src.allocator.proc != nullptr;
}
template <typename T>
bool is_valid (Array<T> src) {
if (src.count == 0) return true;
if (src.count < 0) return false;
if (src.data == nullptr) return false;
if (src.allocated < src.count) return false;
return true;
}
template <typename T>
void array_zero (const Array<T>& src) {
memset(src.data, 0, src.count * sizeof(T));
}
// Here, the copied array uses the same allocator as src. I'm not sure if this is a good idea.
// Applies to `array_copy_zero` and `array_copy`
template <typename T>
Array<T> array_copy (const Array<T>& src) {
if (!src.data || src.count == 0) {
return Array<T>(src.allocator);
}
T* new_data = NewArray<T>(src.count, false);
memcpy(new_data, src.data, src.count * sizeof(T));
return Array<T>(src.allocator, src.count, new_data, src.allocated);
}
template <typename T>
Array<T> array_copy_zero (const Array<T>& src) {
if (!src.data || src.count == 0) {
return Array<T>(src.allocator);
}
T* new_data = NewArray<T>(src.count, false);
memset(new_data, 0, src.count * sizeof(T));
return Array<T>(src.allocator, src.count, new_data, src.allocated);
}
template <typename T>
void array_reset_keeping_memory (Array<T>& src) {
src.count = 0;
}
template <typename T>
void array_free (Array<T>& src) {
if (!src.data) return;
if (src.allocated == 0) return;
if (src.allocator.proc != nullptr) {
src.allocator.proc(Allocator_Mode::DEALLOCATE, 0, 0, src.data, src.allocator.data);
} else {
internal_free(src.data);
}
src.count = 0;
src.data = nullptr;
src.allocated = 0;
}
template <typename T>
void array_initialize (Array<T>& src, s64 start, s64 end) {
for (s64 i = start; i < end; i += 1) {
// Really this can be one ini followed by a bunch of memcpy.
// For long arrays we could power-of-two double the copy out, etc.
src[i] = T(); // `new (&src[i]) T();` also works.
}
}
template <typename T>
void array_reserve (Array<T>& src, s64 desired_items) {
if (desired_items <= src.allocated) return;
if (src.allocator.proc == nullptr) {
src.allocator = context_allocator();
}
Assert(src.allocator.proc != nullptr);
src.data = (T*)src.allocator.proc(Allocator_Mode::RESIZE, desired_items * sizeof(T), src.allocated * sizeof(T), src.data, src.allocator.data);
Assert(src.data != nullptr);
src.allocated = desired_items;
}
template <typename T>
void array_resize (Array<T>& src, s64 new_count, bool initialize=true) {
if (src.count == new_count) return;
s64 old_count = src.count;
array_reserve(src, new_count);
src.count = new_count;
if (initialize) { array_initialize(src, old_count, new_count); }
}
template <typename T>
force_inline void array_maybe_grow (Array<T>& src) {
if (src.count >= src.allocated) {
// Replace with Basic.max(8, 2 * src.count).
s64 reserve = 8;
if (src.count * 2 > reserve) { reserve = src.count * 2; }
array_reserve(src, reserve);
}
}
template <typename T>
T pop (Array<T>& src) {
auto result = src[src.count-1];
src.count -= 1;
return result;
}
template <typename T>
T* array_add (Array<T>& src) {
array_maybe_grow(src);
T* result = &src.data[src.count];
(*result) = T(); // initialize
src.count += 1;
return result;
}
template <typename T>
void array_add (Array<T>& src, T new_item) {
array_maybe_grow(src);
src.data[src.count] = new_item;
src.count += 1;
}
template <typename T>
s64 array_find (Array<T>& src, T item) {
for (s64 i = 0; i < src.count; i += 1) {
if (src[i] == item) return i;
}
return -1;
}
template <typename T>
bool array_add_if_unique (Array<T>& src, T new_item) {
if (array_find(src, new_item) == -1) {
array_add(src, new_item);
return true;
}
return false;
}
template <typename T>
void array_ordered_remove_by_index (Array<T>& src, s64 index) {
Assert(index >= 0); Assert(index < src.count);
for (s64 i = index; i < src.count-1; i += 1) {
src[i] = src[i + 1];
}
src.count -= 1;
}
template <typename T>
void array_ordered_remove_by_value (Array<T>& src, T item) {
auto index = array_find(src, item);
if (index != -1) { array_ordered_remove_by_index(src, index); }
}
template <typename T>
void array_unordered_remove_by_index (Array<T>& src, s64 index) {
Assert(index >= 0); Assert(index < src.count);
auto last_index = src.count - 1;
if (index != last_index) { // Copy back item:
memcpy(&src[index], &src[last_index], sizeof(T));
}
src.count -= 1;
}
template <typename T>
s64 array_unordered_remove_by_value (Array<T>& src, T item, s64 max_count_to_remove) {
s64 removed_count = 0;
for (s64 i = 0; i < src.count; i += 1) {
if (src[i] == item) {
removed_count += 1;
array_unordered_remove_by_index(src, i);
i -= 1; // check this element index again
if (max_count_to_remove == removed_count) { break; }
}
}
return removed_count;
}
template <typename T>
struct ArrayView {
using ValueType = T;
s64 count;
T* data;
ArrayView(Array<T> array) { // auto-#downcast from Array<T>
count = array.count;
data = array.data;
}
// Unfortunately we need ArenaArray to be declared ahead. God, I hate C++.
// We have to use to_view(), and cannot auto-#downcast.
// ArrayView(ArenaArray<T> array) { // auto-#downcast from ArenaArray<T>
// count = array.count;
// data = array.data;
// }
ArrayView() { count = 0; data = nullptr; }
ArrayView(s64 new_count, bool initialize=true) {
count = new_count;
data = NewArray<T>(new_count, initialize);
}
ArrayView(Allocator allocator, s64 new_count, bool initialize=true) {
count = new_count;
data = NewArray<T>(allocator, new_count, initialize);
}
// #Note: use array_view to create slices or to downcast to ArrayView!
ArrayView(s64 _count, T* _data) {
count = _count;
data = _data;
}
T& operator[](s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return static_cast<T*>(data)[index];
}
};
template <typename T> ArrayView<u8> to_byte_view (ArrayView<T> src) {
ArrayView<u8> byte_view = { src.count * sizeof(T), src.data };
return byte_view;
}
template <typename T>
bool is_empty (ArrayView<T> src) {
if (src.count == 0) return true;
return false;
}
// #NOTE: procedures should be robust to arrays with count of zero!
// Whether or not this is an error is procedure specific, but for most
// things, there is a default behavior that is expected.
template <typename T>
bool is_valid (ArrayView<T> src) {
if (src.count < 0) return false;
if (src.count == 0) return true;
if (src.data == nullptr) return false;
return true;
}
// can also use ArrayView<T>(count, data) for initialization!
template <typename T>
ArrayView<T> to_view (Array<T> array) {
ArrayView<T> av;
av.count = array.count;
av.data = array.data;
return av;
}
template <typename T>
ArrayView<T> array_view (ArrayView<T> array, s64 start_index, s64 view_count) {
ArrayView<T> av;
av.count = view_count; // check if count exceeds
Assert(start_index + view_count <= array.count);
av.data = &array[start_index];
return av;
}
template <typename T>
ArrayView<T> array_view (Array<T> array, s64 start_index, s64 view_count) {
ArrayView<T> av;
av.count = view_count; // check if count exceeds
Assert(start_index + view_count <= array.count);
av.data = &array[start_index];
return av;
}
template <typename T>
void array_zero (const ArrayView<T>& src) {
memset(src.data, 0, src.count * sizeof(T));
}
template <typename T>
void array_reset_keeping_memory (ArrayView<T>& src) {
src.count = 0;
}
template <typename T>
ArrayView<T> array_copy (const ArrayView<T>& src) {
if (!src.data || src.count == 0) {
return ArrayView<T>(); // Return an empty array
}
T* new_data = NewArray<T>(src.count);
memcpy(new_data, src.data, src.count * sizeof(T));
return ArrayView<T>(src.count, (T*)new_data);
}
template <typename T>
ArrayView<T> array_copy (Allocator allocator, const ArrayView<T>& src) {
push_allocator(allocator);
return array_copy(src);
}
template <typename T>
void array_free (ArrayView<T>& src) {
if (!src.data || src.count == 0) { return; }
// Use with caution!
internal_free(src.data); // we just have to trust that the context.allocator is correct for this guy!
src.count = 0;
src.data = nullptr;
}
// Usage: `auto array = array_from_values<s32>(6,7,8,9,10,51);`
template <typename T, typename... ArgValues>
Array<T> array_from_values (ArgValues... args) {
constexpr s64 N = sizeof...(ArgValues);
auto array = Array<T>(N, /*initialize:*/false);
T values[] = {args...};
for (s64 i = 0; i < N; i += 1) {
array[i] = values[i];
}
return array;
}
// Usage `auto view = array_view_from_values<s32>(1,2,3,4,5);`
template <typename T, typename... ArgValues>
ArrayView<T> array_view_from_values (ArgValues... args) {
constexpr s64 N = sizeof...(ArgValues);
auto array = ArrayView<T>(N, /*initialize:*/false);
T values[] = {args...};
for (s64 i = 0; i < N; i += 1) {
array[i] = values[i];
}
return array;
}
MSVC_RUNTIME_CHECKS_RESTORE

519
lib/Base/Base.h Normal file
View File

@ -0,0 +1,519 @@
#pragma once
// Some of these macros are ""borrowed"" from nick aversano | source: https://github.com/nickav/na/blob/main/na.h
// #OS_Platform
#if defined(_WIN32)
#define OS_WINDOWS 1
#elif defined(__APPLE__)
#define OS_MACOS 1
#elif defined(__linux__)
#define OS_LINUX 1
#endif
#if !defined(OS_WINDOWS)
#define OS_WINDOWS 0
#endif
#if !defined(OS_LINUX)
#define OS_LINUX 0
#endif
#if !defined(OS_MACOS)
#define OS_MACOS 0
#endif
#if defined(__cplusplus)
#define LANG_CPP 1
#else
#define LANG_C 1
#endif
// #Compiler: Language
#if !defined(LANG_CPP)
#define LANG_CPP 0
#endif
#if !defined(LANG_C)
#define LANG_C 0
#endif
// #Compiler: Vendor
#if defined(__clang__)
#define COMPILER_CLANG 1
#elif defined(_MSC_VER)
#define COMPILER_MSVC 1
#elif defined(__GNUC__) || defined(__GNUG__)
#define COMPILER_GCC 1
#endif
#if !defined(COMPILER_MSVC)
#define COMPILER_MSVC 0
#endif
#if !defined(COMPILER_GCC)
#define COMPILER_GCC 0
#endif
#if !defined(COMPILER_CLANG)
#define COMPILER_CLANG 0
#endif
#if COMPILER_MSVC
#if _MSC_VER >= 1930
#define COMPILER_MSVC_YEAR 2022
#elif _MSC_VER >= 1920
#define COMPILER_MSVC_YEAR 2019
#elif _MSC_VER >= 1910
#define COMPILER_MSVC_YEAR 2017
#elif _MSC_VER >= 1900
#define COMPILER_MSVC_YEAR 2015
#elif _MSC_VER >= 1800
#define COMPILER_MSVC_YEAR 2013
#elif _MSC_VER >= 1700
#define COMPILER_MSVC_YEAR 2012
#elif _MSC_VER >= 1600
#define COMPILER_MSVC_YEAR 2010
#elif _MSC_VER >= 1500
#define COMPILER_MSVC_YEAR 2008
#elif _MSC_VER >= 1400
#define COMPILER_MSVC_YEAR 2005
#else
#define COMPILER_MSVC_YEAR 0
#endif
#endif
// #Architecture: CPU Vendor
#if defined(_WIN32)
#if defined(_M_AMD64)
#define ARCH_CPU_X64 1
#elif defined(_M_IX86)
#define ARCH_CPU_X86 1
#elif defined(_M_ARM64)
#define ARCH_CPU_ARM64 1
#elif defined(_M_ARM)
#define ARCH_CPU_ARM32 1
#endif
#else
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64)
#define ARCH_CPU_X64 1
#elif defined(i386) || defined(__i386) || defined(__i386__)
#define ARCH_CPU_X86 1
#elif defined(__aarch64__)
#define ARCH_CPU_ARM64 1
#elif defined(__arm__)
#define ARCH_CPU_ARM32 1
#endif
#endif
#if !defined(ARCH_CPU_X64)
#define ARCH_CPU_X64 0
#endif
#if !defined(ARCH_CPU_X86)
#define ARCH_CPU_X86 0
#endif
#if !defined(ARCH_CPU_ARM64)
#define ARCH_CPU_ARM64 0
#endif
#if !defined(ARCH_CPU_ARM32)
#define ARCH_CPU_ARM32 0
#endif
// #Architecture: Register Width
#if defined(ARCH_CPU_X64) || defined(ARCH_CPU_ARM64)
#define ARCH_64BIT 1
#elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM32)
#define ARCH_32BIT 1
#endif
#if !defined(ARCH_64BIT)
#define ARCH_64BIT 0
#endif
#if !defined(ARCH_32BIT)
#define ARCH_32BIT 0
#endif
// #Architecture: Endianness
static const int __arch_endian_check_num = 1;
#define ARCH_LITTLE_ENDIAN (*(char *)&__arch_endian_check_num == 1)
#define ARCH_BIG_ENDIAN (!ARCH_LITTLE_ENDIAN)
#if defined(_MSC_VER)
#ifdef _DEBUG
#define BUILD_DEBUG 1
#else
#define BUILD_DEBUG 0
#endif
#elif defined(__GNUC__) || defined(__clang__)
#ifndef NDEBUG
#define BUILD_DEBUG 1
#else
#define BUILD_DEBUG 0
#endif
#endif
#define BUILD_CONSOLE_INTERFACE BUILD_DEBUG
#include <stdio.h> // vsnprintf
#include <cstdarg> // va_list, ...
#if ARCH_CPU_X64
#include "CPU_X64.cpp"
#define PLATFORM_MEMORY_PAGE_SIZE 4096
#define PLATFORM_MEMORY_LARGE_PAGE_SIZE 2097152
#define CPU_REGISTER_WIDTH_BYTES 8
#define CPU_CACHE_LINE_SIZE 64
#else
#error "CPU not supported (yet)!"
#endif
#if OS_WINDOWS
#define WIN32_LEAN_AND_MEAN
#include <Windows.h>
#undef ERROR // why...
#undef NO_ERROR // ugh...
#include <winioctl.h>
#else
#error "This configuration is NOT supported. Only Windows with MSVC is currently supported."
#endif
#ifndef PROTOTYPING_API
#ifdef OS_WINDOWS
#define PROTOTYPING_API extern "C" __declspec(dllexport)
#else
#define PROTOTYPING_API
#endif
#endif // #ifndef PROTOTYPING_API
#define C_API
#define TEMPORARY_API
#define DEPRECATED_API
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
// Primitive types.
// typedef nullptr null
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef int8_t s8;
typedef int16_t s16;
typedef int32_t s32;
typedef int64_t s64;
// typedef bool b8; // just use bool for b8s
typedef s16 b16;
typedef s32 b32;
typedef s64 b64;
typedef float f32;
typedef double f64;
// Units
#define KB(n) (((s64)(n)) << 10)
#define MB(n) (((s64)(n)) << 20)
#define GB(n) (((s64)(n)) << 30)
#define TB(n) (((s64)(n)) << 40)
#define Thousand(n) ((n)*1000)
#define Million(n) ((n)*1000000)
#define Billion(n) ((n)*1000000000)
#define internal static
#define global static
#define local_persist static // I don't like these, so I generally won't use them!
#if COMPILER_MSVC
# define thread_static __declspec(thread)
#elif COMPILER_CLANG || COMPILER_GCC
# define thread_static __thread
#else
# error thread_static not defined for this compiler.
#endif
#if COMPILER_MSVC || (COMPILER_CLANG && OS_WINDOWS)
# pragma section(".rdata$", read)
# define read_only __declspec(allocate(".rdata$"))
#elif (COMPILER_CLANG && OS_LINUX)
# define read_only __attribute__((section(".rodata")))
#else
#endif
#if COMPILER_MSVC
# define force_inline __forceinline
#elif COMPILER_CLANG || COMPILER_GCC
# define force_inline __attribute__((always_inline))
#else
# error force_inline not defined for this compiler.
#endif
// Maybe move to a different file.
force_inline s64 Align_To_Page_Size(s64 n) {
return (n + PLATFORM_MEMORY_PAGE_SIZE - 1) & (~(PLATFORM_MEMORY_PAGE_SIZE-1));
}
template <typename T>
force_inline T Align (T value, s64 alignment) {
s64 intermediate = (((s64)value) + alignment - 1) & (~(alignment - 1));
return (T)intermediate;
}
/*
force_inline s64 Align_Forwards(s64 size, s64 alignment) {
return (((size + alignment - 1) / alignment) * alignment);
}
*/
// Branchless nextpow2 implementation. Returns zero if v is negative.
// All it does is fill in all the bits to the right of the most significant bit.
force_inline s64 Next_Power_Of_Two(s64 v) {
v -= 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v |= v >> 32;
v += 1;
return v;
}
s32 decimal_digits (s32 v_in) {
s32 v = v_in;
if (v < 0) v = -v; // ignore sign
if (v < 10) return 1;
if (v < 100) return 2;
if (v < 1000) return 3;
if (v < 10000) return 4;
if (v < 100000) return 5;
if (v < 1000000) return 6;
if (v < 10000000) return 7;
if (v < 100000000) return 8;
if (v < 1000000000) return 9;
return 10; // max for s32 (2,147,483,648)
}
#define Stringify_(S) #S
#define Stringify(S) Stringify_(S)
#define Concat_(A,B) A##B
#define Concat(A,B) Concat_(A,B)
#if COMPILER_MSVC
# define debug_break() __debugbreak()
#elif COMPILER_CLANG || COMPILER_GCC
# define debug_break() __builtin_trap()
#else
# define debug_break()
# error Unknown trap intrinsic for this compiler.
#endif
#define AssertAlways(x) do{if(!(x)) {debug_break();}}while(0)
#if BUILD_DEBUG
# define Assert(x) AssertAlways(x)
#else
# define Assert(x) (void)(x)
#endif
#if LANG_CPP
# define C_LINKAGE_BEGIN extern "C"{
# define C_LINKAGE_END }
# define C_LINKAGE extern "C"
#else
# define C_LINKAGE_BEGIN
# define C_LINKAGE_END
# define C_LINKAGE
#endif
// Disable some of MSVC most aggressive Debug runtime checks in function header/footer (used in some simple/low-level functions)
#if COMPILER_MSVC
#define MSVC_RUNTIME_CHECKS_OFF __pragma(runtime_checks("",off)) __pragma(check_stack(off)) __pragma(strict_gs_check(push,off))
#define MSVC_RUNTIME_CHECKS_RESTORE __pragma(runtime_checks("",restore)) __pragma(check_stack()) __pragma(strict_gs_check(pop))
#else
#define MSVC_RUNTIME_CHECKS_OFF
#define MSVC_RUNTIME_CHECKS_RESTORE
#endif
// Scoped Macros/Functions for auto_reset and auto_release
// usage `Auto_Reset guard(arena);` within a scope.
#define auto_reset(x) \
Auto_Reset Concat(_auto_reset_guard_, __LINE__)(x)
#define push_allocator(x) \
Push_Allocator Concat(_push_alloc_guard_, __LINE__)(x)
#define push_alignment(x, y) \
Push_Alignment Concat(_push_align_guard_, __LINE__)(x, y)
#define push_arena(x) \
Push_Arena Concat(_push_arena_guard_, __LINE__)(x)
#define push_expandable_arena(x) \
Push_Expandable_Arena Concat(_push_ex_arena_guard_, __LINE__)(x)
#define auto_release(x) \
Auto_Release Concat(_auto_release_guard_, __LINE__)(x)
#define auto_release_temp() \
auto_release(thread_context()->temp)
#define push_allocator_label(x) \
Push_Allocator_Label Concat(_push_allocator_label_guard_, __LINE__)(x)
#define thread_context() thread_local_context
#define temp() allocator(thread_context()->temp)
#define context_allocator() thread_context()->allocator
#define context_logger() &thread_context()->logger
#define context_builder() thread_context()->string_builder
// CHECK THAT THESE ARE CORRECT!
constexpr f32 TAU = 6.283185f;
constexpr f64 TAU_64 = 6.28318530717958648;
constexpr f32 PI = 3.1415927f;
constexpr f64 PI_64 = 3.141592653589793;
constexpr f32 FLOAT16_MAX = 65504.0;
constexpr f32 FLOAT32_MIN = (f32)(0x00800000);
constexpr f32 FLOAT32_MAX = (f32)(0x7F7FFFFF);
constexpr f32 FLOAT32_INFINITY = (f32)(0x7F800000);
constexpr f32 FLOAT32_NAN = (f32)(0x7FBFFFFF);
constexpr f64 FLOAT64_MIN = (f64)(0x0010000000000000ULL);
constexpr f64 FLOAT64_MAX = (f64)(0x7FEFFFFFFFFFFFFFULL);
constexpr f64 FLOAT64_INFINITY = (f64)(0x7FF0000000000000ULL);
constexpr f64 FLOAT64_NAN = (f64)(0x7FF7FFFFFFFFFFFFULL);
constexpr s8 S8_MIN = -128;
constexpr s8 S8_MAX = 127;
constexpr u8 U8_MAX = 255;
constexpr s16 S16_MIN = -32768;
constexpr s16 S16_MAX = 32767;
constexpr u16 U16_MAX = 0xffff;
constexpr s32 S32_MIN = 0x80000000;
constexpr s32 S32_MAX = 0x7fffffff;
constexpr u32 U32_MAX = 0xffffffff;
constexpr s64 S64_MIN = 0x8000000000000000LL;
constexpr s64 S64_MAX = 0x7fffffffffffffffLL;
constexpr u64 U64_MAX = 0xffffffffffffffffULL;
struct Vec2 {
union {
struct { float x, y; };
float data[2];
};
};
struct Vec3 {
union {
struct { float x, y, z; };
float data[3];
};
};
struct Vec4 {
union {
struct { float x, y, z, w; };
float data[4];
};
};
template <typename T> force_inline Vec2 to_vec2 (T any) {
static_assert(sizeof(T) == sizeof(Vec2));
Vec2 v;
memcpy(&v, &any, sizeof(Vec2));
return v;
}
template <typename T> force_inline Vec3 to_vec3 (T any) {
static_assert(sizeof(T) == sizeof(Vec3));
Vec3 v;
memcpy(&v, &any, sizeof(Vec3));
return v;
}
template <typename T> force_inline Vec4 to_vec4 (T any) {
static_assert(sizeof(T) == sizeof(Vec4));
Vec4 v;
memcpy(&v, &any, sizeof(Vec4));
return v;
}
template <typename T> T clamp (T v, T min, T max) {
// T must be a scalar type:
auto x = v;
if (x < min) {
x = min;
}
if (x > max) {
x = max;
}
return x;
}
// #thread_primitives #move?
#if OS_WINDOWS
struct Condition_Variable {
CONDITION_VARIABLE condition_variable;
};
struct Semaphore {
HANDLE event;
};
struct Mutex {
CRITICAL_SECTION csection;
};
struct OS_Thread {
HANDLE windows_thread;
s32 windows_thread_id;
};
struct File {
HANDLE handle;
};
internal void mutex_init (Mutex* mutex);
internal void mutex_destroy (Mutex* mutex);
internal void lock (Mutex* mutex);
internal void unlock (Mutex* mutex);
#endif
#define POSIX_THREADS OS_LINUX || OS_MACOS || OS_IOS || OS_ANDROID
#if OS_MACOS
struct Semaphore {
task_t owner;
semaphore_t event = 0;
};
#endif
#if OS_LINUX || OS_ANDROID
struct Semaphore {
sem_t semaphore;
};
#endif
#if OS_IS_UNIX // #posix threads
struct OS_Thread {
pthread_t thread_handle;
Semaphore is_alive;
Semaphore suspended;
b32 is_done;
};
#endif
#define lock_guard(x) \
Mutex_Lock_Guard Concat(_auto_lock_guard_, __LINE__)(x)
struct Mutex_Lock_Guard {
Mutex* mutex;
Mutex_Lock_Guard (Mutex* mutex) {
this->mutex = mutex;
lock(mutex);
}
~Mutex_Lock_Guard () {
unlock(mutex);
}
};
// Helper macros for raw arrays:
#define ArrayCount(array) sizeof(array) / sizeof(array[0])
// For-loop construct macros
#define for_each_index(_idx_, _until_) for (s64 _idx_ = 0; _idx_ < _until_; ++_idx_)
// For-loops for ArrayView<> compatible types
#define for_each(_idx_, _array_) for (s64 _idx_ = 0; _idx_ < (_array_).count; ++_idx_)
#define for_each_reverse(_idx_, _array_) for (s64 _idx_ = (_array_).count-1; _idx_ >= 0; _idx_--)
// #define for_each_starting_at(_it_, _array_, _start_) for (s64 _it_ = _start_; _it_ < (_array_).count; _it_ += 1)

View File

@ -0,0 +1,100 @@
// See Context_Base in jai, and TCTX in raddebugger:
struct Push_Allocator_Label {
string old_label;
Push_Allocator_Label(string new_label) {
old_label = thread_context()->allocator_label;
thread_context()->allocator_label = copy_string_untracked(new_label);
}
~Push_Allocator_Label() {
string_free_untracked(thread_context()->allocator_label);
thread_context()->allocator_label = old_label;
}
};
internal void Bootstrap_Main_Thread_Context () {
Timed_Block_Print_No_Context("Bootstrap_Main_Thread_Context");
// 0. Setup general allocator
default_allocator_initialize_tracking();
// 1. Setup arena free list
// #note: the arena free list is disabled because I'm not convinced it's a good idea.
// It would allow us to cache arenas to load address space very quickly (much faster than calling VirtualAlloc), but
// it adds complexity and makes it difficult to know when you're doing something stupid, because memory is still writeable
// and readable after it's "freed" with `release_arena`. So for prototyping purposes, we just release the whole arena.
// arena_free_list = (Arena_Free_List*)default_allocator_new(sizeof(Arena_Free_List), 64, true); // permanent allocation.
// memset(arena_free_list, 0, sizeof(Arena_Free_List));
// initialize_arena_free_list(default_allocator());
// 1b. Setup arena in-use list:
initialize_arenas_in_use_list();
// 2. #NewContext Setup thread local context
ExpandableArena* arena_ex = bootstrap_expandable_arena(Arena_Reserve::Size_64M, "Main Thread Arena");
thread_local_context = New<Thread_Context>(allocator(arena_ex));
push_allocator_label("Main Thread Initialization");
thread_local_context->temp = bootstrap_expandable_arena(Arena_Reserve::Size_2M, "Main Thread Temp");
thread_local_context->arena = arena_ex;
thread_local_context->allocator = allocator(arena_ex);
thread_local_context->thread_idx = 0;
thread_local_context->thread_name = "Main Thread";
// #TODO (Low priority) This is redundant! There should just be one string_builder!
thread_local_context->log_builder = new_string_builder(Arena_Reserve::Size_64M, "Main Thread Log Builder");
thread_local_context->string_builder = new_string_builder(Arena_Reserve::Size_2M, "Main Thread String Builder");
thread_local_context->error_arena = bootstrap_arena(Arena_Reserve::Size_64M, "Main Thread Error Arena");
default_logger_initialize();
thread_local_context->logger = {default_logger_proc, &default_logger};
}
struct Push_Arena {
Thread_Context* context;
Allocator original_allocator;
Push_Arena(ExpandableArena* arena_ex) {
Assert(is_valid(arena_ex));
context = thread_context();
Assert(context != nullptr);
original_allocator = context->allocator;
context->allocator = allocator(arena_ex);
}
Push_Arena(Arena* arena) {
Assert(is_valid(arena));
context = thread_context();
Assert(context != nullptr);
original_allocator = context->allocator;
context->allocator = allocator(arena);
}
Push_Arena(FixedArena* arena) {
Assert(is_valid(arena));
context = thread_context();
Assert(context != nullptr);
original_allocator = context->allocator;
context->allocator = allocator(arena);
}
~Push_Arena() {
context->allocator = original_allocator;
}
};
force_inline void set_thread_context (Thread_Context* new_context) {
thread_local_context = new_context;
}
// #Note: Both functions will free next arenas, we only worry about keeping memory in the first arena (typically 64MB).
void temp_reset_keeping_memory() {
Thread_Context* context = thread_context();
arena_reset(context->temp, false);
}
void temp_reset() { // alias: reset_temporary_storage.
Thread_Context* context = thread_context();
arena_reset(context->temp, true);
}

View File

@ -0,0 +1,187 @@
#define ENABLE_STACK_TRACE 0 // #stack_trace
// #define ENABLE_STACK_TRACE BUILD_DEBUG // #stack_trace
// #hacky fwd declares
struct Source_Code_Location {
string file_name;
string function_name;
s32 line_number;
};
struct Stack_Trace_Node {
Stack_Trace_Node* next;
// Information
Source_Code_Location data;
s32 call_depth;
};
struct Error;
struct Graphics;
struct Thread_Context {
ExpandableArena* temp; // Used for temporary allocations, scratch space.
ExpandableArena* arena; // general purpose local arena
Allocator allocator;
s32 thread_idx;
// u16 _padding0;
u16 default_allocator_alignment = 16;
string allocator_label;
Logger logger = {nullptr, nullptr};
String_Builder* log_builder; // String builder used by log() and log_error_internal()
String_Builder* string_builder; // Secondary builder just for convenience!
#if ENABLE_STACK_TRACE
Stack_Trace_Node* stack_trace; // use `list(stack_trace)` in watch window of raddbg to view as array!
#endif
Array<Thread*> child_threads; // maybe should be linked-list?
Thread_Context* parent_thread_context = nullptr; // so we can remove from above array
string thread_name;
Allocator error_allocator = default_allocator();
Error* first_error = nullptr;
Error* current_error = nullptr;
Arena* error_arena;
// Graphics stuff:
Graphics* graphics;
void* userdata; // for appending other arenas, etc.
};
// C_LINKAGE thread_static TCTX* tctx_thread_local;
thread_static Thread_Context* thread_local_context;
// #TODO #NewContext void create_thread_context (Thread_Context** context, string thread_name, bool is_main_thread);
// Thread-context #Errors:
internal void Bootstrap_Main_Thread_Context ();
struct Push_Allocator {
Thread_Context* context;
Allocator old_allocator;
Push_Allocator (Allocator new_allocator) {
context = thread_context();
if (this->context != nullptr) {
old_allocator = context->allocator;
context->allocator = new_allocator;
} else {
old_allocator = default_allocator();
}
}
~Push_Allocator () {
if (this->context != nullptr) {
context->allocator = old_allocator;
}
}
};
// #stack_trace
void push_stack_trace_internal (Thread_Context* context, string file_name, string function_name, s32 line_number) {
#if ENABLE_STACK_TRACE
if (context == nullptr) return;
Assert(context != nullptr);
// #no_context allocation
Stack_Trace_Node* new_node = (Stack_Trace_Node*)default_allocator_new(sizeof(Stack_Trace_Node));
new_node->data.file_name = file_name;
new_node->data.function_name = function_name;
new_node->data.line_number = line_number;
new_node->next = nullptr;
if (context->stack_trace == nullptr) {
new_node->call_depth = 1;
} else {
new_node->call_depth = context->stack_trace->call_depth + 1;
new_node->next = context->stack_trace;
}
context->stack_trace = new_node;
#endif
}
void pop_stack_trace_internal (Thread_Context* context) {
#if ENABLE_STACK_TRACE
if (context == nullptr) return;
Stack_Trace_Node* old_node = context->stack_trace;
context->stack_trace = old_node->next;
default_allocator_free(old_node);
#endif
}
#if ENABLE_STACK_TRACE
#define stack_trace() \
Push_Stack_Trace Concat(_push_stack_trace_guard_, __LINE__)(__FILE__, __FUNCTION__, __LINE__)
#else
#define stack_trace()
#endif
struct Push_Stack_Trace {
Thread_Context* context;
Push_Stack_Trace (string file_name, string function_name, s32 line_number) {
context = thread_context();
push_stack_trace_internal(context, file_name, function_name, line_number);
}
~Push_Stack_Trace () {
pop_stack_trace_internal(context);
}
};
// #TODO: precede with something like: os_write_string_unsynchronized("Fatal Error!\n\nStack trace:", true);
string generate_stack_trace (Thread_Context* context) {
#if ENABLE_STACK_TRACE
// #no_context - we want this to work even if context is utterly broken.
String_Builder* sb = new_string_builder(Arena_Reserve::Size_64K, "generate_stack_trace");
Stack_Trace_Node* node = context->stack_trace;
print_to_builder(sb, "Thread index: %d, thread name: %s\n\n", context->thread_idx, context->thread_name.data);
while (node) {
append(sb, format_string("%s:%d: %s\n", node->data.file_name.data, node->data.line_number, node->data.function_name.data));
node = node->next;
}
append(sb, "\n");
push_allocator(default_allocator());
string stack_trace_copy = builder_to_string(sb);
free_string_builder(sb);
return stack_trace_copy;
#else
return {};
#endif
}
// We don't want to use context logger here!
void print_stack_trace () {
Thread_Context* context = thread_context();
constexpr bool TO_STANDARD_ERROR = true;
os_write_string_unsynchronized(generate_stack_trace(context), TO_STANDARD_ERROR);
// Stack_Trace_Node* node = context->stack_trace;
// while (node) {
// os_write_string_unsynchronized(node->data.file_name, TO_STANDARD_ERROR);
// string line_number_str = format_string(":%d: ", node->data.line_number); // maybe I shouldn't do this?
// os_write_string_unsynchronized(line_number_str, TO_STANDARD_ERROR);
// // os_write_string_unsynchronized("'", TO_STANDARD_ERROR);
// os_write_string_unsynchronized(node->data.function_name, TO_STANDARD_ERROR);
// os_write_string_unsynchronized("\n", TO_STANDARD_ERROR);
// node = node->next;
// }
}

175
lib/Base/Bit_Array.h Normal file
View File

@ -0,0 +1,175 @@
struct Bit_Array { // we need to keep track of our allocator separately.
u64* data;
s32 count;
s32 data_count; // do we even need this??
bool operator[](s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return (bool)(data[index >> 6] & (1ull << (index & 63)));
}
}; // 16 B.
// bool contains (Bit_Array* src, Bit_Array* query) {
// Assert(src && query);
// Assert(src->data_count == query->data_count);
// for (s32 i = 0; i < src->data_count; i += 1) {
// if (src->data[i] & query->data[i] != query->data[i]) {
// return false;
// }
// }
// return true;
// }
bool contains_single_ascii (Bit_Array* src, Bit_Array* query) {
Assert(src && query);
Assert(src->data_count == query->data_count);
for (s32 i = 0; i < src->data_count; i += 1) {
// if query u64 is non-zero, we should get a non-zero result:
if (query->data[i] && (src->data[i] & query->data[i]) == 0) {
return false;
}
}
return true;
}
void bit_array_initialize (Bit_Array* ba, s64 count) {
s64 u64_count = (count + 63) >> 6;
ba->data = NewArray<u64>(u64_count);
ba->data_count = u64_count;
ba->count = count;
}
// Returns a bit array of the given size:
Bit_Array new_bit_array (s64 count) {
Bit_Array ba;
bit_array_initialize(&ba, count);
return ba;
}
void bit_array_delete (Bit_Array* ba) {
internal_free(ba->data);
ba->count = 0;
ba->data_count = 0;
}
void set_bit (Bit_Array* ba, s64 i) {
Assert(i < ba->count);
ba->data[i >> 6] |= (1ull << (i & 63));
}
void clear_bit (Bit_Array* ba, s64 i) {
Assert(i < ba->count);
ba->data[i >> 6] &= ~(1ull << (i & 63));
}
void toggle_bit (Bit_Array* ba, s64 i) {
Assert(i < ba->count);
ba->data[i >> 6] ^= ~(1ull << (i & 63));
}
// See "Conditionally set or clear bits without branching" at http://graphics.stanford.edu/~seander/bithacks.html
force_inline s64 set_bits (s64 w, s64 m, bool b) {
return (w & ~m) | ((-(s64)b) & m);
}
void set_bit_to (Bit_Array* ba, s64 i, bool value) {
Assert(i < ba->count);
ba->data[i >> 6] = set_bits(ba->data[i >> 6], 1ull << (i & 63), value);
Assert((*ba)[i] == value);
}
void clear_all_bits (Bit_Array* ba) {
memset(ba->data, 0, ba->data_count * sizeof(u64));
}
void set_all_bits (Bit_Array* ba) {
memset(ba->data, 0xFF, ba->data_count * sizeof(u64));
}
void toggle_all_bits (Bit_Array* ba) {
for (s64 s = 0; s < ba->data_count; s += 1) {
ba->data[s] ^= U64_MAX;
}
}
// Using Bit_Array.jai as a reference:
/*
struct Bit_Array { // 40 B
ArrayView<u64> slots;
s64 count; // bit count;
Allocator allocator;
bool operator[](s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return (bool)(slots[index >> 6] & (1ull << (index & 63)));
}
};
void bit_array_initialize (Bit_Array* ba, s64 count) {
if (ba->allocator.proc == nullptr) ba->allocator = context_allocator();
s64 u64_count = (count + 63) >> 6;
ba->slots = ArrayView<u64>(ba->allocator, u64_count);
ba->count = count;
}
// Returns a bit array of the given size:
Bit_Array new_bit_array (s64 count) {
Bit_Array ba;
bit_array_initialize(&ba, count);
return ba;
}
void bit_array_delete (Bit_Array* ba) {
push_allocator(ba->allocator);
array_free(ba->slots);
ba->slots = {};
ba->count = 0;
}
// #TODO: set_bit, clear_bit, toggle_bit, etc.
void set_bit (Bit_Array* ba, s64 i) {
Assert(i < ba->count);
ba->slots[i >> 6] |= (1ull << (i & 63));
}
void clear_bit (Bit_Array* ba, s64 i) {
Assert(i < ba->count);
ba->slots[i >> 6] &= ~(1ull << (i & 63));
}
void toggle_bit (Bit_Array* ba, s64 i) {
Assert(i < ba->count);
ba->slots[i >> 6] ^= ~(1ull << (i & 63));
}
void set_bit_to (Bit_Array* ba, s64 i, bool value) {
Assert(i < ba->count);
ba->slots[i >> 6] = set_bits(ba->slots[i >> 6], 1ull << (i & 63), value);
Assert((*ba)[i] == value);
}
void clear_all_bits (Bit_Array* ba) {
array_zero(ba->slots);
}
void set_all_bits (Bit_Array* ba) {
memset(ba->slots.data, 0xFF, ba->slots.count * sizeof(u64));
}
void toggle_all_bits (Bit_Array* ba) {
for_each(s, ba->slots) {
ba->slots[s] ^= U64_MAX;
}
}
*/

35
lib/Base/CPU_X64.cpp Normal file
View File

@ -0,0 +1,35 @@
#if OS_WINDOWS
#include <intrin.h>
int cpu_max_feature_leaf () {
int cpuInfo[4];
__cpuid(cpuInfo, 0);
int maxLeaf = cpuInfo[0];
return maxLeaf;
}
int CPU_Base_Frequency () {
int cpuInfo[4] = {0};
// Call CPUID with EAX = 0x16 (Base CPU Frequency)
if (cpu_max_feature_leaf() >= 0x16)
__cpuid(cpuInfo, 0x16);
return cpuInfo[0];
}
#endif
#if OS_IS_UNIX
#include <cpuid.h>
int CPU_Base_Frequency () {
unsigned int eax, ebx, ecx, edx;
if (__get_cpuid(0x16, &eax, &ebx, &ecx, &edx)) {
return eax;
}
return 0; // not found or supported
}
#endif

252
lib/Base/ErrorType.cpp Normal file
View File

@ -0,0 +1,252 @@
#define NO_ERROR nullptr
enum class ErrorClass: s32 {
NONE = 0, // should not be used, just to avoid a default value being assigned.
WARNING = 1,
ERROR = 2,
FATAL = 3,
TODO = 4,
};
// #downcasts to string
struct Error {
s64 count;
u8* data;
ErrorClass severity = ErrorClass::NONE;
s32 thread_id;
s32 source_line;
string file_path;
string function_name;
string thread_name;
f64 timestamp;
// Linked list to errors
Error* previous_error; // if we're passing errors up the callstack.
Error* next_error;
};
char* error_severity (ErrorClass severity) {
switch (severity) {
case ErrorClass::NONE: {
return "[NONE]";
} break;
case ErrorClass::WARNING: {
return "[WARNING]";
} break;
case ErrorClass::ERROR: {
return "[ERROR]";
} break;
case ErrorClass::FATAL: {
return "[FATAL ERROR]";
} break;
case ErrorClass::TODO: {
return "[TODO]";
} break;
}
return "";
}
void push_error (Thread_Context* tctx, Error* new_error);
string to_string (Error* error) {
return { error->count, error->data };
}
#define log_todo(fmt, ...) \
log_error_internal(__FILE__, __FUNCTION__, __LINE__, ErrorClass::TODO, fmt, ##__VA_ARGS__)
#define log_fatal_error(fmt, ...) \
log_error_internal(__FILE__, __FUNCTION__, __LINE__, ErrorClass::FATAL, fmt, ##__VA_ARGS__)
#define log_error(fmt, ...) \
log_error_internal(__FILE__, __FUNCTION__, __LINE__, ErrorClass::ERROR, fmt, ##__VA_ARGS__)
#define log_warning(fmt, ...) \
log_error_internal(__FILE__, __FUNCTION__, __LINE__, ErrorClass::WARNING, fmt, ##__VA_ARGS__)
#define log_none(fmt, ...) \
log_error_internal(__FILE__, __FUNCTION__, __LINE__, ErrorClass::NONE, fmt, ##__VA_ARGS__)
Error* new_error (ErrorClass severity, string error_string) {
Error* error = New<Error>();
error->count = error_string.count;
error->data = error_string.data;
error->severity = severity;
return error;
}
void log_error_internal (string file_path, string function_name, s32 line_number, ErrorClass severity, string fmt, ...) {
auto tctx = thread_context();
Assert(tctx != nullptr);
push_arena(tctx->error_arena);
String_Builder* sb = thread_context()->log_builder;
print_to_builder(sb, "%s ", error_severity(severity));
va_list args;
va_start(args, fmt);
print_to_builder_internal(sb, fmt, args);
va_end(args);
append(sb, "\n");
string error_string = copy_string(string_view(sb));
reset_string_builder(sb);
Error* error = new_error(severity, error_string);
// Additional information
error->thread_id = tctx->thread_idx;
error->source_line = line_number;
error->file_path = copy_string(file_path);
error->function_name = copy_string(function_name);
// Note: we don't need to assign previous_error or next_error, as that is done by the thread_context when we #push_error
error->previous_error = nullptr;
error->next_error = nullptr;
error->thread_name = copy_string(tctx->thread_name);
error->timestamp = GetUnixTimestamp();
push_error(tctx, error);
}
Error* copy_error (Thread_Context* tctx, Error* old_error) {
push_arena(tctx->error_arena);
string error_string_copy = copy_string(to_string(old_error));
Error* error = new_error(old_error->severity, error_string_copy);
error->thread_id = old_error->thread_id;
error->source_line = old_error->source_line;
error->file_path = copy_string(old_error->file_path);
error->function_name = copy_string(old_error->function_name);
// Note: we don't need to assign previous_error or next_error, as that is done by the thread_context when we #push_error
error->previous_error = nullptr;
error->next_error = nullptr;
error->thread_name = copy_string(old_error->thread_name);
error->timestamp = old_error->timestamp;
return error;
}
void push_error (Thread_Context* tctx, Error* new_error) {
Assert(tctx == thread_context()); // Not a real assert, just wondering if we'll ever call this with a non-local context?
Assert(new_error != nullptr);
if (new_error == nullptr) return;
Error* current_error = tctx->current_error;
if (current_error) {
current_error->next_error = new_error;
new_error->previous_error = current_error;
} else {
tctx->first_error = new_error;
new_error->previous_error = nullptr;
}
tctx->current_error = new_error;
switch (new_error->severity) {
case ErrorClass::TODO:
case ErrorClass::NONE:
case ErrorClass::WARNING: {
print(to_string(new_error));
} break;
case ErrorClass::ERROR:
case ErrorClass::FATAL: {
print_error(to_string(new_error));
} break;
}
}
// push error to a context without back-linking to previous context
void push_error_no_context (Thread_Context* tctx, Error* new_error) {
Assert(new_error != nullptr);
if (new_error == nullptr) return;
Error* current_error = tctx->current_error;
if (current_error) {
new_error->previous_error = current_error;
current_error->next_error = new_error;
} else {
tctx->first_error = new_error;
}
tctx->current_error = new_error;
}
void clear_errors (Thread_Context* tctx) { // Reset pointers and reset error_arena
arena_reset(tctx->error_arena); // maybe overwrite memory?
tctx->first_error = nullptr;
tctx->current_error = nullptr;
}
void clear_error (Thread_Context* tctx, Error* error) {
// If we want to clear a specific error (simply remove from the list)
Assert(error != nullptr);
bool is_current_error = (tctx->current_error == error);
bool is_first_error = (tctx->first_error == error);
Error* current_error = tctx->first_error;
if (current_error == nullptr) return; // no errors in linked list.
while (current_error != error && current_error != nullptr) {
current_error = current_error->next_error;
}
Assert(current_error != nullptr);
if (current_error == nullptr) return; // shouldn't happen
Error* the_previous_error = current_error->previous_error;
Error* the_next_error = current_error->next_error;
// Remove current_node from linked list:
if (the_previous_error) {
the_previous_error->next_error = the_next_error;
}
if (the_next_error) {
the_next_error->previous_error = the_previous_error;
}
if (is_first_error && is_current_error) { // It matches the only item in the list, just empty the list:
clear_errors(tctx);
return;
}
if (is_first_error) {
// the_next_error becomes new first error.
tctx->first_error = the_next_error;
}
if (is_current_error) {
// the current_error becomes the previous error
tctx->current_error = the_previous_error;
}
}
void push_errors_to_parent_thread (Thread_Context* tctx) {
if (!tctx->first_error) return;
Assert(tctx->parent_thread_context);
while (tctx->first_error) {
Error* error_copy = copy_error(tctx->parent_thread_context, tctx->first_error);
push_error_no_context(tctx->parent_thread_context, error_copy);
clear_error(tctx, tctx->first_error);
}
}
ArrayView<Error*> get_all_errors (Thread_Context* tctx) {
Array<Error*> error_array = {};
// call with temp() recommended.
Error* current_error = tctx->first_error;
while (current_error) {
array_add(error_array, current_error);
current_error = current_error->next_error;
}
// #TODO(Low priority): also recurse through child threads?
// NOTE: I don't think we actually want this, because we merge
// our errors on the main thread when we thread_deinit.
// for_each(t, tctx->child_threads) { }
return error_array;
}
// Will need to use __FILE__ and __LINE__ macros
// Error* new_error (string error_message, ErrorClass severity, Error* previous_error=nullptr);
// Error* append_error (Error* old_error, Error* new_error);
// void context_report_error (Error* error);
// void cleanup_error (Error* error);

View File

@ -0,0 +1,197 @@
ExpandableArena* bootstrap_expandable_arena_internal (Arena_Reserve new_reserve, s32 commit_page_count,
string label, string file_path, string function_name, s32 line_number) {
ExpandableArena* new_arena = (ExpandableArena*)bootstrap_arena_internal(
new_reserve,
commit_page_count,
label,
file_path,
function_name,
line_number);
// Note: beyond first 32 bytes (sizeof(Arena)) ExpandableArena will not be initialized,
// so we do it here:
new_arena->current = (Arena*)new_arena;
arena_set_chained_flag(new_arena->current);
new_arena->current_point = expandable_arena_start(new_arena);
new_arena->next_arenas = Array<Arena*>(); // next_arenas will be uninitialized, so we have to do this
// We have to use malloc because if we reset this new arena, all the data will be lost
// We don't want to tie the lifetime of next_arenas to this expandable arena.
new_arena->next_arenas.allocator = default_allocator();
// array_reserve(new_arena->next_arenas, 8);
return new_arena;
}
force_inline void update_high_water_mark (ExpandableArena* arena_ex) {
#if BUILD_DEBUG
s64 current_usage = arena_usage_bytes(arena_ex);
if (current_usage > arena_ex->high_water_mark) {
arena_ex->high_water_mark = current_usage;
}
#endif
}
void* expandable_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
ExpandableArena* arena = (ExpandableArena*)allocator_data;
Assert(arena != nullptr);
switch (mode) {
case Allocator_Mode::ALLOCATE: {
void* new_memory = expandable_arena_alloc(arena, requested_size);
update_high_water_mark(arena);
return new_memory;
} break;
case Allocator_Mode::RESIZE: {
// See note :ArenaResizing
void* new_memory = expandable_arena_alloc(arena, requested_size);
memcpy(new_memory, old_memory, old_size);
update_high_water_mark(arena);
return new_memory;
} break;
case Allocator_Mode::DEALLOCATE: {
return nullptr;
} break;
case Allocator_Mode::DETAILS: {
if (allocator_data == nullptr) {
return "expandable_arena_allocator_proc: data pointer is null!";
}
if (thread_context()->temp == allocator_data) {
return "expandable_arena_allocator_proc: temp arena";
}
if (thread_context()->arena == allocator_data) {
return "expandable_arena_allocator_proc: main arena";
}
return "expandable_arena_allocator_proc: other arena";
} break;
}
return nullptr;
}
bool is_valid (ExpandableArena* arena) {
return (arena != nullptr)
&& (arena->memory_base != nullptr)
&& (arena->current != nullptr);
}
void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count) {
Assert(arena_ex != nullptr);
Assert(arena_ex->memory_base != nullptr); // must be initialized before calling.
Assert(is_valid(arena_ex));
// Assert(arena_free_list->initialized);
Arena* arena = (Arena*)arena_ex->current;
u8* result = Align<u8*>(arena->current_point, arena->alignment);
u8* result_end = result + byte_count;
if (result_end > arena->first_uncommitted_page) {
if (result_end > arena_address_limit(arena)) {
// Pick an appropriate reserve size that will fit this allocation.
Arena_Reserve new_min_reserve = next_reserve_size(byte_count + sizeof(Arena));
if (arena->reserve_size > new_min_reserve) {
new_min_reserve = arena->reserve_size;
}
#if BUILD_DEBUG
Arena* new_arena = bootstrap_arena(new_min_reserve, arena_ex->label);
#else
Arena* new_arena = bootstrap_arena(new_min_reserve, "");
#endif
new_arena->alignment = arena_ex->alignment;
new_arena->flags = arena_ex->flags;
arena_ex->current = new_arena;
push_allocator_label("arena_ex->next_arenas");
array_add(arena_ex->next_arenas, new_arena);
// Allocate from new arena, committing pages as required
result = Align<u8*>(new_arena->current_point, new_arena->alignment);
result_end = result + byte_count;
if (result_end > new_arena->first_uncommitted_page) {
extend_committed_pages(new_arena, result_end);
}
} else {
extend_committed_pages(arena, result_end);
}
}
arena_ex->current->current_point = result_end;
return result;
}
u8* expandable_arena_start (ExpandableArena* arena_ex) {
return Align(arena_ex->memory_base + sizeof(ExpandableArena), ARENA_DEFAULT_ALIGNMENT);
}
Allocator allocator (ExpandableArena* arena_ex) {
return { expandable_arena_allocator_proc, arena_ex };
}
// last arena is the arena we want to pop to.
void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point) {
// going backwards from end of arena list
if (!arena_ex->next_arenas.count) {
arena_ex->current_point = starting_point;
return;
}
for_each_reverse(i, arena_ex->next_arenas) {
Arena* arena = arena_ex->next_arenas[i];
if (arena == last_arena) { // return to starting_point
arena_ex->current = arena;
arena_ex->current->current_point = starting_point;
break;
}
arena_delete(arena);
// We can just decrement the count because arenas are added and removed in order. see: pop()
arena_ex->next_arenas.count -= 1;
}
if (last_arena == (Arena*)arena_ex) {
arena_ex->current = (Arena*)arena_ex;
arena_ex->current->current_point = starting_point;
}
}
void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages) {
if (!is_valid(arena_ex)) return;
// Free expansion arenas in `next_arenas`
for (s64 i = 0; i < arena_ex->next_arenas.count; i += 1) {
arena_delete(arena_ex->next_arenas[i]);
}
// Reset next_arenas
#if BUILD_DEBUG
array_zero(arena_ex->next_arenas);
#endif
array_reset_keeping_memory(arena_ex->next_arenas);
arena_ex->current = (Arena*)arena_ex;
arena_ex->current_point = expandable_arena_start(arena_ex);
if (free_extra_pages) {
free_pages_down_to((Arena*)arena_ex, arena_ex->initial_commit_page_count);
}
}
// #TODO: make an option to "FULL-DELETE" the expansion arenas as well.
force_inline void arena_delete (ExpandableArena* arena_ex) {
arena_reset(arena_ex, true);
array_free(arena_ex->next_arenas);
arena_delete((Arena*)arena_ex);
}
s64 arena_usage_bytes (ExpandableArena* arena_ex) {
s64 total = arena_ex->current_point - expandable_arena_start(arena_ex);
for_each(a, arena_ex->next_arenas) {
total += arena_usage_bytes(arena_ex->next_arenas[a]);
}
return total;
}

View File

@ -0,0 +1,32 @@
// Just an idea I had so that we can start with a small arena and increase on an as-needed basis,
// this way allocations are always extremely fast.
// The full structure is 80B, and because we use bootstrapping, we only need a little extra memory for storing
// the `next_arenas` pointers
// DO NOT MERGE WITH `Arena`, we need fixed size arenas so that we can back
// `ArenaArray`s.
struct ExpandableArena : Arena {
Arena* current;
Array<Arena*> next_arenas;
};
#if BUILD_DEBUG
#define bootstrap_expandable_arena(_reserve_, _label_) \
bootstrap_expandable_arena_internal((_reserve_), (s32)ARENA_DEFAULT_COMMIT_PAGE_COUNT, (_label_), __FILE__, __FUNCTION__, __LINE__)
#else
#define bootstrap_expandable_arena(_reserve_, _label_) \
bootstrap_expandable_arena_internal((_reserve_), (s32)ARENA_DEFAULT_COMMIT_PAGE_COUNT, (_label_))
#endif
ExpandableArena* bootstrap_expandable_arena_internal (Arena_Reserve new_reserve=Arena_Reserve::Size_64K, s32 commit_page_count=ARENA_DEFAULT_COMMIT_PAGE_COUNT,
string label="", string file_path="", string function_name="", s32 line_number=0);
void* expandable_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
bool is_valid (ExpandableArena* arena);
void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count);
s64 arena_usage_bytes (ExpandableArena* arena_ex);
u8* expandable_arena_start (ExpandableArena* arena_ex);
Allocator allocator (ExpandableArena* arena_ex);
void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point);
void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages);
force_inline void arena_delete (ExpandableArena* arena_ex);

217
lib/Base/File_Search.h Normal file
View File

@ -0,0 +1,217 @@
struct File_Search_Filter_Params {
bool search_directories = true;
bool search_files = true;
bool exact_match = false;
bool order_most_recent = false; // this should be an enum, but w/e :SortOrderComboBox
bool match_word = false;
bool case_sensitive = false;
// #TODO: File data
// Preprocess params
// ArrayView<bool> include_drive; // defaults all to true.
};
struct File_Search {
string query;
File_Search_Filter_Params params;
// Filesystem Data:
#if OS_WINDOWS
Win32_File_Enumeration* src;
// #TODO: reserve space for result indices!
#endif
Thread_Group thread_group;
};
Thread_Continue_Status file_search_thread_group_proc (Thread_Group* group, Thread* thread, void* work) {
return Thread_Continue_Status::CONTINUE;
}
void prepare_file_search (File_Search* fs, string query) {
// Note we will have to copy all the params out of File_Search into File_Search_Task/MT
// Tokenizer t = get_tokenizer_from_string(query);
// Tokenizer is the wrong move, we really just want to split up the query and analyze each bit separately.
// 1. We want to split by spaces or other significant symbols like `|`
Array<string> query_split;
query_split.allocator = default_allocator();
// 2. construct filters from splits
// a. contains drive letters (can be independent, or include additional dirs, e.g. `C:\jai`
// b. "NOT" operator `!`
}
void file_search_run_search_query (File_Search* fs, string query) {
Assert(fs->src != nullptr); // check our data is actually there.
if (fs->src == nullptr) return;
prepare_file_search(fs, query);
// 1. preprocess and configure filter parameters for search query:
// a. search for drive letters `C:` etc. and `C:\`
// we need to tokenize the search query!
// Tokenizer query_tokenizer = {query, 0};
// Check if thread_group initted:
if (!fs->thread_group.initialized) {
// option1: os_cpu_physical_core_count, option2: os_cpu_logical_core_count
push_allocator(default_allocator());
bool enable_work_stealing = false;
string thread_group_label = "File_Search";
thread_group_init(&fs->thread_group, os_cpu_physical_core_count(), file_search_thread_group_proc,
thread_group_label, enable_work_stealing);
// #TODO: split up work.
// Assert(params->include_drive.count == fs->src->drive_data.count);
// for_each(d, fs->src->drive_data) {
// if (!params->include_drive[d]) continue; // only include drives we're actually searching.
// // #current
// }
thread_group_start(&fs->thread_group);
}
}
global Thread_Group bit_table_thread_group;
global s64 bit_table_thread_group_work_added = 0;
global s64 bit_table_thread_group_work_completed = 0;
global f64 bit_table_task_start_time = 0;
global f64 bit_table_task_end_time_main_thread = 0;
global s64 bit_table_task_result_count = 0;
void bit_table_task_reset_global_state () {
bit_table_thread_group_work_added = 0;
bit_table_thread_group_work_completed = 0;
bit_table_task_start_time = 0;
bit_table_task_end_time_main_thread = 0;
bit_table_task_result_count = 0;
}
struct Bit_Table_Test_Work {
Bit_Array bit_query;
ArrayView<Bit_Array> bit_arrays;
Array<u32> results;
s32 index_offset;
s32 sample_count;
b32 directories; // either it's fst_dirs or fst_files.
string query; // for debugging / printing
// For memchr stuff:
Win32_File_Enumeration_Drive* drive;
};
void delete_task (Bit_Table_Test_Work* btt) {
string_free(btt->query);
bit_array_delete(&btt->bit_query);
array_free(btt->results);
}
Thread_Continue_Status bit_table_testing_thread_group_proc (Thread_Group* group, Thread* thread, void* work) {
Bit_Table_Test_Work* btt = thread_group_task(Bit_Table_Test_Work);
// debug_break();
auto_release_temp();
// We need to use filter params here to check if this is correct.
Assert(btt->query.count == 1);
for (s64 b = 0; b < btt->sample_count; b += 1) {
// For single ascii character: testing bit arrays
string current_entry;
if (btt->directories) {
current_entry = directory_name_string_view(btt->drive, btt->index_offset + b);
} else {
current_entry = file_name_string_view(btt->drive, btt->index_offset + b);
}
// memchr (find characters in a buffer)
if (is_ascii_letter(btt->query.data[0])) {
u8 lower_case = to_lower_ascii(btt->query.data[0]);
u8 upper_case = to_upper_ascii(btt->query.data[0]);
if (memchr_avx2(current_entry.data, lower_case, (u16)current_entry.count)
|| memchr_avx2(current_entry.data, upper_case, (u16)current_entry.count)) {
array_add(btt->results, (u32)(btt->index_offset + b));
}
} else Assert(false); // not yet implemented for symbols!
// For any X64 use memchr
// if (memchr(current_entry.data, lower_case, current_entry.count) || memchr(current_entry.data, upper_case, current_entry.count)) {
// for_each(b, btt->bit_arrays) {
// if (contains_single_ascii(&btt->bit_arrays[b], &btt->bit_query)) {
// array_add(btt->results, (u32)b);
// }
// Testing memchr:
}
f64 elapsed_time = GetUnixTimestamp()-bit_table_task_start_time;
push_allocator(temp());
log("Done with query %s in %s",
btt->query.data,
format_time_seconds(elapsed_time).data);
return Thread_Continue_Status::CONTINUE;
}
void bit_table_testing_init () {
if (bit_table_thread_group.initialized) return;
bool enable_work_stealing = true;
thread_group_init(&bit_table_thread_group, os_cpu_logical_core_count(), bit_table_testing_thread_group_proc,
"bit_table_thread_group", enable_work_stealing);
thread_group_start(&bit_table_thread_group);
}
void bit_table_testing_add_query (string query) {
bit_table_testing_init();
bit_table_task_start_time = GetUnixTimestamp();
s32 thread_count = bit_table_thread_group.worker_info.count;
// dirs...
for_each(d, w32fe->drive_data) { // for each drive
auto drive = &w32fe->drive_data[d];
s64 dirs_per_thread = directory_count(drive) / thread_count;
s64 start_offset = 0;
auto src_array = to_view(drive->fst_dirs.bit_arrays);
for (s32 i = 0; i < thread_count; i += 1) {
auto btt = New<Bit_Table_Test_Work>();
btt->drive = drive;
btt->query = copy_string(query);
bit_array_initialize(&btt->bit_query, 128);
Bit_Array_Add_Value_Ascii_No_Case(&btt->bit_query, btt->query);
if (i == thread_count - 1) {
dirs_per_thread = (src_array.count - start_offset);
}
btt->bit_arrays = array_view(src_array, start_offset, dirs_per_thread);
btt->index_offset = start_offset;
btt->sample_count = dirs_per_thread;
start_offset += dirs_per_thread;
btt->results = Array<u32>(context_allocator(), btt->bit_arrays.count);
btt->directories = true;
add_work(&bit_table_thread_group, btt);
bit_table_thread_group_work_added += 1;
}
Assert(start_offset == directory_count(drive));
}
// files...
for_each(d, w32fe->drive_data) { // for each drive
auto drive = &w32fe->drive_data[d];
s64 files_per_thread = file_count(drive) / thread_count;
s64 start_offset = 0;
auto src_array = to_view(drive->fst_files.bit_arrays);
for (s32 i = 0; i < thread_count; i += 1) {
auto btt = New<Bit_Table_Test_Work>();
btt->drive = drive;
btt->query = copy_string(query);
bit_array_initialize(&btt->bit_query, 128);
Bit_Array_Add_Value_Ascii_No_Case(&btt->bit_query, btt->query);
if (i == thread_count - 1) {
files_per_thread = (src_array.count - start_offset);
}
btt->bit_arrays = array_view(src_array, start_offset, files_per_thread);
btt->index_offset = start_offset;
btt->sample_count = files_per_thread;
start_offset += files_per_thread;
btt->results = Array<u32>(context_allocator(), btt->bit_arrays.count);
btt->directories = false;
add_work(&bit_table_thread_group, btt);
bit_table_thread_group_work_added += 1;
}
Assert(start_offset == file_count(drive));
}
}

View File

@ -0,0 +1,182 @@
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
global General_Allocator gAllocator; // @Shared
global Mutex allocator_mutex;
global bool default_allocator_show_small_allocations = false;
#endif
#if !COMPILER_MSVC
// Note: There is *no* std::aligned_realloc. Must implement manually if needed.
force_inline void* gp_aligned_realloc(u64 old_size, void* ptr, u64 new_size, u64 alignment) {
if (!ptr || old_size == 0) return std::aligned_alloc(alignment, new_size);
if (new_size == 0) { std::free(ptr); return nullptr; }
// Allocate new block
void* new_ptr = std::aligned_alloc(alignment, new_size);
if (!new_ptr) return nullptr;
u64 copy_size = old_size < new_size ? old_size : new_size;
memcpy(new_ptr, ptr, copy_size);
std::free(ptr);
return new_ptr;
}
#endif
General_Allocator* get_general_allocator_data() {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
return &gAllocator;
#else
return (General_Allocator*)nullptr;
#endif
}
constexpr s64 Allocation_Tracking_Is_Enabled = GP_ALLOCATOR_TRACK_ALLOCATIONS;
bool default_allocator_tracking_enabled () {
return Allocation_Tracking_Is_Enabled != 0;
}
void default_allocator_initialize_tracking () {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
mutex_init(&allocator_mutex);
constexpr s64 alignment = 64;
s64 item_count_max = 64 * 4096;
s64 total_allocation_size = item_count_max * sizeof(Allocation);
auto memory = Aligned_Alloc(total_allocation_size, alignment); // @MemoryLeak (intentional)
gAllocator.allocations = Array<Allocation>(default_allocator(), item_count_max, memory, item_count_max);
gAllocator.allocations.count = 0; // Init to zero.
#endif
}
bool default_allocator_is_this_yours (void* old_memory) {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
lock_guard(&allocator_mutex);
s64 old_size = 0;
for (s64 i = 0; i < gAllocator.allocations.count; i += 1) {
if (gAllocator.allocations[i].memory != old_memory)
continue;
return true;
}
#endif
return false;
}
void Add_Allocation(s64 new_size, void* new_memory_address, s32 alignment) {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
if (new_memory_address == nullptr) return;
string label = {};
if (!thread_context()) {
label = copy_string_untracked("Uninitialized context");
} else {
if (is_valid(thread_context()->allocator_label)) {
label = copy_string_untracked(thread_context()->allocator_label);
} else {
label = copy_string_untracked("Unlabeled allocation");
}
}
Allocation allocation = {new_size, new_memory_address, alignment, label};
lock_guard(&allocator_mutex);
array_add(gAllocator.allocations, allocation);
gAllocator.total_bytes_allocated += new_size;
#endif
}
void Remove_Allocation(void* old_memory) {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
lock_guard(&allocator_mutex);
s64 old_size = 0;
for (s64 i = 0; i < gAllocator.allocations.count; i += 1) {
if (gAllocator.allocations[i].memory != old_memory)
continue;
old_size = gAllocator.allocations[i].size;
if (is_valid(gAllocator.allocations[i].label)) {
string_free_untracked(gAllocator.allocations[i].label);
}
array_unordered_remove_by_index(gAllocator.allocations, i);
gAllocator.total_bytes_allocated -= old_size;
return;
}
Assert(false); // "Did not find allocation in Array"
#endif
}
void* default_allocator_new (s64 new_size, s64 alignment, bool initialize) {
// Fallback allocator: _aligned_malloc, which is MSVC's version of std::aligned_alloc
auto memory = Aligned_Alloc(new_size, alignment);
// _aligned_malloc does not zero memory, so we can zero it here
if (initialize && memory) { memset(memory, ALLOCATOR_INIT_VALUE, new_size); }
Add_Allocation(new_size, memory, (s32)alignment);
// printf("[GP] Allocating memory %p of size %llu\n", memory, new_size);
return memory;
}
void* default_allocator_realloc (s64 old_size, void* old_memory, s64 new_size, s64 alignment, bool initialize) {
Assert((alignment % 8) == 0 && (alignment != 0));
if (old_memory == nullptr) {
return default_allocator_new(new_size, alignment);
}
// Debug version: _aligned_realloc_dbg
auto new_memory_address = Aligned_Realloc(old_size, old_memory, new_size, alignment);
if (initialize && new_memory_address && new_size > old_size) {
memset((u8*)new_memory_address + old_size, ALLOCATOR_INIT_VALUE, new_size - old_size);
}
Remove_Allocation(old_memory);
Add_Allocation(new_size, new_memory_address, (s32)alignment);
// printf("[GP] Rellocating memory %p of size %llu\n", new_memory_address, new_size);
return new_memory_address;
}
void default_allocator_free (void* memory) {
if (memory == nullptr) return;
Aligned_Free(memory);
Remove_Allocation(memory);
// printf("[GP] Deleting memory %p\n", memory);
}
Allocator default_allocator () {
return { default_allocator_proc, nullptr };
}
void* default_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
u16 alignment = 16; // default alignment
Thread_Context* context = thread_context();
if (context) alignment = context->default_allocator_alignment;
switch (mode) {
case Allocator_Mode::ALLOCATE: {
return default_allocator_new(requested_size, alignment);
} break;
case Allocator_Mode::RESIZE: {
void* result = default_allocator_realloc(old_size, old_memory, requested_size, alignment);
// NOTE: The _aligned_realloc function already copies the old memory, so there's
// no need to copy the old memory block here.
return result;
} break;
case Allocator_Mode::DEALLOCATE: {
default_allocator_free(old_memory); // unused
} break;
case Allocator_Mode::DETAILS: {
Assert(allocator_data == nullptr);
return "default_allocator";
} break;
}
return nullptr;
}

View File

@ -0,0 +1,56 @@
#pragma once
#define GP_ALLOCATOR_TRACK_ALLOCATIONS BUILD_DEBUG
#define GP_ALLOCATOR_VERY_DEBUG BUILD_DEBUG && 0
#if COMPILER_MSVC
#include <malloc.h> // _aligned_malloc, _aligned_realloc, _aligned_free (MSVC Only!)
#if GP_ALLOCATOR_VERY_DEBUG
#include <crtdbg.h> // required for _dbg variants
#define Aligned_Alloc(sz, align) _aligned_malloc_dbg(sz, align, __FILE__, __LINE__)
#define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc_dbg(ptr, sz, align, __FILE__, __LINE__)
#define Aligned_Free(ptr) _aligned_free_dbg(ptr)
#else
#define Aligned_Alloc(sz, align) _aligned_malloc(sz, align)
#define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc(ptr, sz, align)
#define Aligned_Free(ptr) _aligned_free(ptr)
#endif
#else // Non-MSVC (POSIX / GCC / Clang)
#include <cstdlib> // std::aligned_alloc
#define Aligned_Alloc(sz, align) std::aligned_alloc(align, sz)
#define Aligned_Realloc(old_sz, ptr, sz, align) gp_aligned_realloc(old_sz, ptr, sz, align)
#define Aligned_Free(ptr) std::free(ptr)
#endif
struct Allocation {
s64 size;
void* memory;
s32 alignment;
string label;
};
struct General_Allocator {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
// NOTE: This is VERY slow, a hashmap is better suited here,
// but this is just a quick and dirty solution for now.
Array<Allocation> allocations;
s64 total_bytes_allocated = 0;
#endif
};
General_Allocator* get_general_allocator_data();
constexpr u16 default_allocator_default_alignment = 16;
Allocator default_allocator ();
void* default_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
void* default_allocator_new (s64 new_size, s64 alignment=16, bool initialize=true);
void* default_allocator_realloc (s64 old_size, void* old_memory, s64 new_size, s64 alignment=16, bool initialize=true);
void default_allocator_free (void* memory);
bool default_allocator_is_this_yours (void* old_memory);
void default_allocator_initialize_tracking ();
bool default_allocator_tracking_enabled ();
// #TODO: I want to be able to tag any allocations in debug mode.

78
lib/Base/Hash_Functions.h Normal file
View File

@ -0,0 +1,78 @@
constexpr u32 HASH_INIT = 5381;
u32 sdbm_hash (void* data, s64 size) {
u32 h = HASH_INIT;
for (s64 i = 0; i < size; i += 1) {
h = (h << 16) + (h << 6) - h + ((u8*)data)[i];
}
return (u32)h;
}
u64 knuth_hash (u64 x) {
constexpr u64 KNUTH_GOLDEN_RATIO_64 = 11400714819323198485ULL;
return (KNUTH_GOLDEN_RATIO_64 * x);
}
u32 knuth_hash_u32 (u64 x) {
u32 h = HASH_INIT;
return (u32)((knuth_hash(x) ^ h) >> 32);
}
constexpr u64 FNV_64_PRIME = 0x100000001b3ULL;
constexpr u64 FNV_64_OFFSET_BIAS = 0xcbf29ce484222325ULL;
u64 fnv1a_hash (u64 x, u64 h = FNV_64_OFFSET_BIAS) {
h ^= x;
return h * FNV_64_PRIME;
}
// Good for hashing strings.
u64 fnv1a_hash_any (void* data, s64 size, u64 h = FNV_64_OFFSET_BIAS) {
for (s64 i = 0; i < size; i += 1) {
h = fnv1a_hash( ((u8*)data)[i], h);
}
return h;
}
u32 table_hash_function_fnv1a (void* key, s64 size) {
return (u32)fnv1a_hash_any(key, size);
}
u32 table_hash_function_knuth (void* key, s64 size) {
Assert(size == 8);
return knuth_hash_u32(*(u64*)key);
}
u32 string_hash_function_fnv1a (void* key, s64 size) {
Assert(size == sizeof(string));
string key_as_string = *((string*)key);
u64 hash_u64 = fnv1a_hash_any(key_as_string.data, key_as_string.count);
// It should be xor folded to the desired range rather than shifted:
return (u32)(hash_u64 ^ (hash_u64 >> 32));
}
bool u32_keys_match (void* key1, void* key2) {
u32 key1_u32 = *(u32*)key1;
u32 key2_u32 = *(u32*)key2;
return key1_u32 == key2_u32;
}
bool u64_keys_match (void* key1, void* key2) {
u64 key1_u64 = *(u64*)key1;
u64 key2_u64 = *(u64*)key2;
return key1_u64 == key2_u64;
}
bool string_keys_match (void* key1, void* key2) {
string key1_s = *((string*)key1);
string key2_s = *((string*)key2);
return strings_match(key1_s, key2_s);
}

325
lib/Base/Hash_Table.h Normal file
View File

@ -0,0 +1,325 @@
// #NOTE: This Hash Table is borrowed from Jai's implementation! (With some tweaks)
// I made my own version that's arena-backed, but the mechanisms are the same.
typedef u32 hash_result;
constexpr hash_result HASH_TABLE_FIRST_VALID_HASH = 2;
constexpr hash_result HASH_TABLE_REMOVED_HASH = 1;
typedef hash_result (*Hash_Function)(void* key, s64 size);
typedef bool (*Hash_Compare_Function)(void* key1, void* key2);
template <typename T, typename U>
struct Table_Entry {
using KeyType = T; using ValueType = U;
T key;
U value;
hash_result hash;
};
template <typename T, typename U>
struct Table {
using KeyType = T; using ValueType = U;
Allocator allocator = {};
ArrayView<Table_Entry<T, U>> entries = {};
s64 allocated = 0;
s64 count = 0;
s64 slots_filled = 0;
Hash_Function hash_function = nullptr;
Hash_Compare_Function compare_function = nullptr;
u32 load_factor_percent = 70;
s64 add_collisions = 0;
s64 find_collisions = 0;
bool refill_removed = true;
bool count_collisions = false;
};
template <typename T, typename U> bool table_is_valid (Table<T, U>* table) {
if (table == nullptr) return false;
if (table->entries.data == nullptr) return false;
if (table->allocated == 0) return false;
if (table->hash_function == nullptr) return false;
if (table->compare_function == nullptr) return false;
return true;
}
template <typename T, typename U> void table_init (Table<T, U>* table, s64 slots_to_allocate=64) {
if (table->allocator.proc == nullptr) {
table->allocator = context_allocator(); // #remember_allocator
}
push_allocator(table->allocator);
s64 n = Next_Power_Of_Two(slots_to_allocate);
table->entries = ArrayView<Table_Entry<T, U>>(table->allocator, n, true);
table->allocated = n;
for (s64 i = 0; i < n; i += 1) {
table->entries[i].hash = 0;
}
// default hash and compare functions:
// table->hash_function = table_hash_function_fnv1a;
// table->compare_function = u64_keys_match;
}
// Adds given key value pair to the table, returns a pointer to the inserted value.
template <typename T, typename U> U* table_add (Table<T, U>* table, T key, U value) {
Assert(table_is_valid(table));
Assert(table->load_factor_percent < 100);
if ( ((table->slots_filled + 1) * 100) >= (table->allocated * table->load_factor_percent) ) {
table_resize(table, Next_Power_Of_Two(table->allocated + 64));
}
Assert(table->slots_filled < table->allocated);
// #Walk_Table
u32 mask = (u32)(table->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = table->entries[index].hash;
while (table_while_loop) {
if (table->refill_removed) {
if (table->entries[index].hash == HASH_TABLE_REMOVED_HASH) {
table->slots_filled -= 1; // 1 will get re-added below, for total increment 0.
break;
}
}
if (table->count_collisions) {
table->add_collisions += 1;
}
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = table->entries[index].hash;
}
// Walk_Table walked us to an unused entry, so add our new data into this slot:
table->count += 1;
table->slots_filled += 1;
Table_Entry<T, U>* entry = &table->entries[index];
entry->hash = hash;
entry->key = key;
entry->value = value;
return &entry->value;
}
template <typename T, typename U> U* table_find_pointer (Table<T, U>* table, T key) {
Assert(table_is_valid(table));
if (!table_is_valid(table)) return nullptr;
// #Walk_Table
u32 mask = (u32)(table->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = table->entries[index].hash;
while (table_while_loop) {
Table_Entry<T, U>* entry = &table->entries[index];
if (entry->hash == hash) {
if (table->compare_function(&entry->key, &key)) {
return &entry->value;
}
}
if (table->count_collisions) { table->find_collisions += 1; }
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = table->entries[index].hash;
}
return nullptr;
}
template <typename T, typename U> U* table_set (Table<T, U>* table, T key, U value) {
U* value_ptr = table_find_pointer(table, key);
if (value_ptr) {
(*value_ptr) = value;
return value_ptr;
} else {
return table_add(table, key, value);
}
}
template <typename T, typename U> void table_resize (Table<T, U>* table, s64 slots_to_allocate) {
s64 initial_count = table->entries.count;
Assert(slots_to_allocate > initial_count);
if (slots_to_allocate <= initial_count) {
return;
}
ArrayView<Table_Entry<T, U>> old_entries = table->entries;
s64 n = Next_Power_Of_Two(slots_to_allocate);
table->entries = ArrayView<Table_Entry<T, U>>(table->allocator, n, true);
table->allocated = n;
table->count = 0;
table->slots_filled = 0;
// Initialize new values:
for (s64 i = 0; i < old_entries.count; i += 1) {
Table_Entry<T, U> entry = old_entries[i];
// if entry is valid!
if (entry.hash > HASH_TABLE_FIRST_VALID_HASH) {
table_add(table, entry.key, entry.value);
}
}
// Free old entries
push_allocator(table->allocator);
array_free(old_entries);
}
template <typename T, typename U> void table_reset (Table<T, U>* table, bool keep_memory=true) {
table->count = 0;
table->slots_filled = 0;
for (s64 i = 0; i < table->entries.count; i += 1) {
table->entries[i].hash = 0;
}
if (!keep_memory) {
push_allocator(table->allocator);
array_free(table->entries);
}
}
template <typename T, typename U> void table_release (Table<T, U>* table) {
array_free(table->entries);
#if BUILD_DEBUG
poison_struct(table);
#endif
}
template <typename T, typename U> bool table_contains (Table<T, U>* table, T key) {
return (table_find_pointer(table, key) != nullptr);
}
template <typename T, typename U> bool table_find (Table<T, U>* table, T key, U* value) {
U* pointer = table_find_pointer(table, key);
if (pointer) {
(*value) = (*pointer);
return true;
}
return false;
}
template <typename T, typename U> bool table_remove (Table<T, U>* table, T key, U* value) {
Assert(table_is_valid(table));
if (!table_is_valid(table)) return nullptr;
// #Walk_Table
u32 mask = (u32)(table->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = table->entries[index].hash;
while (table_while_loop) {
Table_Entry<T, U>* entry = &table->entries[index];
if ((entry->hash == hash) && table->compare_function(&entry->key, &key)) {
entry->hash = HASH_TABLE_REMOVED_HASH;
table->count -= 1;
(*value) = entry->value;
return true;
}
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = table->entries[index].hash;
}
return false;
}
template <typename T, typename U> ArrayView<U> table_find_multiple (Table<T, U>* table, T key, U* value) {
Array<U> results;
// #NOTE: We should allow setting an allocator instead of defaulting to temp()?
// results.allocator = temp();
// #Walk_Table
u32 mask = (u32)(table->allocated - 1);
u32 hash = table->hash_function(&key, sizeof(T));
if (hash < HASH_TABLE_FIRST_VALID_HASH) {
hash += HASH_TABLE_FIRST_VALID_HASH;
}
u32 index = hash & mask;
u32 probe_increment = 1;
u32 table_while_loop = table->entries[index].hash;
while (table_while_loop) {
Table_Entry<T, U>* entry = &table->entries[index];
if (entry->hash == hash) {
if (table->compare_function(&entry->key, &key)) {
array_add(results, entry->value);
} else {
if (table->count_collisions) { table->find_collisions += 1; }
}
} else {
if (table->count_collisions) { table->find_collisions += 1; }
}
index = (index + probe_increment) & mask;
probe_increment += 1;
table_while_loop = table->entries[index].hash;
}
return to_view(results);
}
// find_or_add is kind of like table_set, but used when you just want a pointer to the value, which you can fill in.
template <typename T, typename U> U* table_find_or_add (Table<T, U>* table, T key, bool* newly_added) {
U* value = table_find_pointer(table, key);
if (value) {
(*newly_added) = false;
return value;
}
U new_value = {};
value = table_add(table, key, new_value);
(*newly_added) = true;
return value;
}

32
lib/Base/Logger.cpp Normal file
View File

@ -0,0 +1,32 @@
void log (string fmt, ...) {
String_Builder* sb = thread_context()->log_builder;
va_list args;
va_start(args, fmt);
print_to_builder_internal(sb, fmt, args);
va_end(args);
// Append newline if needed
string result = string_view(sb);
bool ends_with_newline = (result.data[result.count-1] == '\n');
if (!ends_with_newline) {
append(sb, "\n");
}
string message = string_view(sb);
Logger* logger = context_logger();
logger->proc(message, Log_Level::None, logger->data);
reset_string_builder(sb, true);
}
void print (string message) {
Logger* logger = context_logger();
logger->proc(message, Log_Level::None, logger->data);
}
void print_error (string error_message) {
Logger* logger = context_logger();
logger->proc(error_message, Log_Level::Error, logger->data);
}

70
lib/Base/Logger.h Normal file
View File

@ -0,0 +1,70 @@
// #TODO(Low priority) #Logger module
// [ ] Add colored prints (See: Print_Color.jai)
// See Logger.jai in our jiim-dev-gui project for how to do fancy colored text.
enum class Log_Level : s32 {
TODO = -2,
Trace = -1,
None = 0,
Info = 1,
Warning = 2,
Error = 3,
Fatal_Error = 4,
};
// log_function pointer
typedef void (*Logger_Proc)(string log_message, Log_Level level, void* data);
void default_logger_proc (string log_message, Log_Level level, void* data); //fwd declared
struct Logger {
Logger_Proc proc;
void* data;
};
struct Default_Logger {
Logger_Proc proc = default_logger_proc;
String_Builder* sb = nullptr;
Mutex* mutex;
#if OS_WINDOWS
void* windows_standard_output;
void* windows_standard_error;
#endif
};
global Default_Logger default_logger;
// default_logger_proc()...
#if OS_WINDOWS
void os_write_string_unsynchronized(string s, bool to_standard_error) {
u32 written = 0;
void* handle = (to_standard_error) ?
default_logger.windows_standard_error :
default_logger.windows_standard_output;
Assert(handle != nullptr);
bool result = (bool)WriteFile(handle, s.data, (u32)s.count, (LPDWORD)&written, nullptr);
}
#endif
void default_logger_proc (string log_message, Log_Level level, void* data) {
bool to_standard_error = level == Log_Level::Error || level == Log_Level::Fatal_Error;
lock(default_logger.mutex);
os_write_string_unsynchronized(log_message, to_standard_error);
unlock(default_logger.mutex);
}
void default_logger_initialize() {
// see: Bootstrap_Main_Thread_Context
default_logger.mutex = New<Mutex>(true);
mutex_init(default_logger.mutex);
#if OS_WINDOWS
default_logger.windows_standard_output = GetStdHandle(STD_OUTPUT_HANDLE);
default_logger.windows_standard_error = GetStdHandle(STD_ERROR_HANDLE);
#endif
}
void log (string fmt, ...);
void print (string message);
void print_error (string error_message);

52
lib/Base/New_String.cpp Normal file
View File

@ -0,0 +1,52 @@
/*
#define SB_HELPER() \
if (sb->allocated < max_array_size(*sb)) { \
array_reserve(*sb, max_array_size(*sb)); \
} \
s64 buffer_size = sb->allocated - sb->count;\
u8* current_point = &sb->data[sb->count]
// This is not super flexible because we cannot do leading zeros or align...
template <typename T> // Base-10
string int_to_string(String_Builder* sb, T value, s32 width, bool is_signed) {
SB_HELPER();
string result;
bool negative = false;
u8 temporary_buffer[32];
s32 position = 0;
u64 v = 0;
if (is_signed && value < 0) {
negative = true;
// convert to unsigned magnitude safely
v = (u64)(-(s64)value);
} else {
v = (u64)value;
}
// Special case 0:
if (v == 0) {
}
}
force_inline void print_to_builder (String_Builder* sb, u8 v) {
SB_HELPER();
return int_to_string(v, 8, is_signed=false);
}
force_inline void print_to_builder (String_Builder* sb, string v) {
sb->count += v.count;
}
// fallback:
template <typename T>
force_inline string print_to_builder (String_Builder* sb, T&) {
return string("[unsupported type for to_builder conversion!]");
}
*/

142
lib/Base/RadixSort.cpp Normal file
View File

@ -0,0 +1,142 @@
struct RadixSort {
ArrayView<u32> ranks;
ArrayView<u32> ranks2;
Allocator allocator;
bool valid_ranks;
};
void radix_sort_init (RadixSort* r, u32 items_to_allocate) {
if (r->allocator.proc == nullptr) {
r->allocator = context_allocator();
}
push_allocator(r->allocator);
r->ranks = ArrayView<u32>(items_to_allocate);
r->ranks2 = ArrayView<u32>(items_to_allocate);
r->valid_ranks = false;
}
void radix_sort_free (RadixSort* r) {
Assert(r->allocator.proc != nullptr);
push_allocator(r->allocator);
array_free(r->ranks);
array_free(r->ranks2);
}
// #rename copy_indices?
ArrayView<u32> copy_ranks (RadixSort* r) {
return array_copy(r->ranks);
}
// RadixSort provides an array of indices in sorted order.
u32 rank (RadixSort* r, s64 i) {
Assert(r != nullptr);
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (i < 0 || i >= r->ranks.count) { debug_break(); /*INDEX OOB*/ }
#endif
return r->ranks[i];
}
template <typename T> void create_histograms (RadixSort* r, T* buffer, u32 count, u32* histogram) {
constexpr u32 bucket_count = sizeof(T);
// Init bucket pointers:
u32* h[bucket_count] = {};
for (u32 i = 0; i < bucket_count; i += 1) {
h[i] = histogram + (256 * i);
}
// Build histogram:
u8* p = (u8*)buffer;
u8* pe = (p + count * sizeof(T));
while (p != pe) {
h[0][*p] += 1; p += 1;
if (bucket_count > 1) { // how to make compile time if?
h[1][*p] += 1; p += 1;
if (bucket_count > 2) {
h[2][*p] += 1; p += 1;
h[3][*p] += 1; p += 1;
if (bucket_count == 8) {
h[4][*p] += 1; p += 1;
h[5][*p] += 1; p += 1;
h[6][*p] += 1; p += 1;
h[7][*p] += 1; p += 1;
}
}
}
}
}
template <typename T> void radix_sort (RadixSort* r, T* input, u32 count) {
constexpr u32 T_SIZE = sizeof(T);
// Allocate histograms & offsets on the stack:
u32 histogram [256 * T_SIZE] = {};
u32* link [256];
create_histograms(r, input, count, histogram);
// Radix sort, j is the pass number, (0 = LSB, P = MSB)
for (u32 j = 0; j < T_SIZE; j += 1) {
u32* h = &histogram[j * 256];
u8* input_bytes = (u8*)input;
input_bytes += j; // Assumes little endian!
if (h[input_bytes[0]] == count) {
continue;
}
// Create offsets
link[0] = r->ranks2.data;
for (u32 i = 1; i < 256; i += 1) { // 1..255
link[i] = link[i-1] + h[i-1];
}
// Perform Radix Sort
if (!r->valid_ranks) {
for (u32 i = 0; i < count; i += 1) {
*link[input_bytes[i*T_SIZE]] = i;
link[input_bytes[i*T_SIZE]] += 1;
}
r->valid_ranks = true;
} else {
for (u32 i = 0; i < count; i += 1) {
u32 idx = r->ranks[i];
*link[input_bytes[idx*T_SIZE]] = idx;
link[input_bytes[idx*T_SIZE]] += 1;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in ranks after the swap.
ArrayView<u32> ranks2_temp = r->ranks2;
r->ranks2 = r->ranks;
r->ranks = ranks2_temp;
}
// All values were equal; generate linear ranks
if (!r->valid_ranks) {
for (u32 i = 0; i < count; i += 1) {
r->ranks[i] = i;
r->valid_ranks = true;
}
}
}
// NOTE: For a small number of elements it's more efficient to use insertion sort
void radix_sort_u64 (RadixSort* r, u64* input, u32 count) {
if (input == nullptr || count == 0) return;
if (r->ranks.count == 0) {
radix_sort_init(r, count);
}
radix_sort(r, input, count);
}
// #StringRadixSort
void radix_sort_strings (RadixSort* r, ArrayView<string> strings) {
// 1. sort by length first
ArrayView<u32> lengths = ArrayView<u32>(strings.count);
// 2.
}

165
lib/Base/Serializer.h Normal file
View File

@ -0,0 +1,165 @@
typedef ArenaArray<u8> Serializer;
force_inline Serializer* new_serializer (Arena_Reserve new_reserve, string label="") {
string serializer_label = format_string(temp(), "new_serializer: (%s)", label.data);
return arena_array_new<u8>(1, new_reserve, serializer_label);
}
force_inline void reset_serializer (Serializer* serializer) {
#if BUILD_DEBUG
array_poison_range(*serializer, 0, serializer->count);
#endif
reset_keeping_memory(*serializer);
}
force_inline void free_serializer (Serializer* serializer) {
arena_array_free(*serializer);
}
// force_inline ArrayView<u8> to_view (Serializer* serializer);
// #redundant, just call to_view (ArenaArray<T>&)
force_inline s64 count_bytes (Serializer* serializer) {
return serializer->count;
}
template <typename T> force_inline void Add (Serializer* serializer, T item) {
u8* current_point = &serializer->data[serializer->count];
s64 final_count = serializer->allocated + sizeof(T);
if (serializer->allocated < final_count) {
array_reserve(*serializer, final_count);
}
memcpy(current_point, &item, sizeof(T));
serializer->count += sizeof(T);
}
template <typename T> force_inline void AddArray_NoSize (Serializer* serializer, ArrayView<T> view) {
u8* current_point = &serializer->data[serializer->count];
s64 final_count = serializer->allocated + (view.count * sizeof(T));
if (serializer->allocated < final_count) {
array_reserve(*serializer, final_count);
}
memcpy(current_point, view.data, view.count * sizeof(T));
serializer->count += view.count * sizeof(T);
}
template <typename T> force_inline void AddArray (Serializer* serializer, ArrayView<T> view) {
Add(serializer, view.count);
AddArray_NoSize(serializer, view);
}
force_inline void AddString (Serializer* serializer, string s) {
Add(serializer, s.count);
AddArray_NoSize(serializer, to_view(s));
}
force_inline void AddString32 (Serializer* serializer, string s) {
u32 string_length = (u32)s.count;
Add(serializer, string_length);
AddArray_NoSize(serializer, to_view(s));
}
force_inline void AddString16 (Serializer* serializer, string s) {
u16 string_length = (u16)s.count;
Add(serializer, string_length);
AddArray_NoSize(serializer, to_view(s));
}
struct Deserializer { // #downcasts to ArrayView<u8>
s64 count;
u8* data;
s64 cursor;
Deserializer (ArrayView<u8> view) {
count = view.count;
data = view.data;
cursor = 0;
}
};
template <typename T> force_inline void Read (Deserializer* ds, T* item) {
u8* current_point = &ds->data[ds->cursor];
memcpy(item, current_point, sizeof(T));
ds->cursor += sizeof(T);
}
template <typename T> force_inline void ReadArrayView (Deserializer* ds, ArrayView<T>& view, s64 view_count) {
view.count = view_count;
view.data = (T*)&ds->data[ds->cursor];
ds->cursor += (view_count * sizeof(T));
}
// Here array should be allocated and have a non-zero count!
template <typename T> force_inline void ReadArray (Deserializer* ds, ArrayView<T> view) {
u8* current_point = &ds->data[ds->cursor];
memcpy(view.data, current_point, view.count * sizeof(T));
ds->cursor += view.count * sizeof(T);
}
template <typename T> force_inline ArrayView<T> ReadSizedArray (Deserializer* ds) { // #allocates
ArrayView<T> array;
Read(ds, &array.count);
array.data = NewArray<T>(array.count, false);
ReadArray(ds, array);
return array;
}
template <typename T> void ReadToArenaArray (Deserializer* ds, ArenaArray<T>* aa) {
ArrayView<T> temp;
Read(ds, &temp.count);
ReadArrayView(ds, temp, temp.count);
copy_from_view(aa, temp);
}
force_inline void ReadStringView (Deserializer* ds, string& sv, s64 view_count) {
sv.count = view_count;
sv.data = &ds->data[ds->cursor];
ds->cursor += view_count;
}
force_inline void ReadString (Deserializer* ds, string& s) { // #no_alloc
Read(ds, &s.count);
ReadStringView(ds, s, s.count);
}
force_inline void ReadString32 (Deserializer* ds, string& s) { // #no_alloc
u32 str_len = 0;
Read(ds, &str_len);
ReadStringView(ds, s, (s64)str_len);
}
force_inline void ReadString16 (Deserializer* ds, string& s) { // #no_alloc
u16 str_len = 0;
Read(ds, &str_len);
ReadStringView(ds, s, (s64)str_len);
}
// This is specialized for filesystem storage of strings.
force_inline u32 AddString_NoCount (Serializer* serializer, u8* data, s16 count) {
u32 original_count = (u32)serializer->count;
u8* current_point = &serializer->data[original_count];
s64 final_count = serializer->allocated + (count * sizeof(u8));
if (serializer->allocated < final_count) {
array_reserve(*serializer, final_count);
}
memcpy(current_point, data, count * sizeof(u8));
serializer->count += count * sizeof(u8);
return original_count;
}

564
lib/Base/String.cpp Normal file
View File

@ -0,0 +1,564 @@
// #NOTE: All string building, printing and copying operations SHOULD null-terminate the
// strings for backwards compatibility reasons. #FIX if something doesn't follow this rule!
bool is_valid (string s) {
return (s.data != nullptr && s.count > 0);
}
bool is_c_string (string s) {
return (s.data && s.data[s.count] == '\0');
}
u8* to_c_string (string s) {
u8* result = (u8*)internal_alloc(s.count + 1);
memcpy(result, s.data, s.count);
result[s.count] = '\0';
return result;
}
string copy_string (string s) {
// Assert(s.count > 0);
if (s.count <= 0)
return "";
string str = {};
str.count = s.count;
str.data = (u8*)internal_alloc(s.count + 1);
memcpy(str.data, s.data, s.count);
str.data[str.count] = '\0'; // null-terminate for backwards compatibility?
return str;
}
force_inline string copy_string (Allocator allocator, string s) {
Assert(allocator.proc != nullptr);
push_allocator(allocator);
return copy_string(s);
}
// we should have a global atomically incrementing for "untracked" and "no context" allocations.
string copy_string_untracked (string s) { // #unsafe
if (s.count <= 0)
return "";
string str = {};
str.count = s.count;
str.data = (u8*)Aligned_Alloc(s.count + 1, default_allocator_default_alignment);
memcpy(str.data, s.data, s.count);
str.data[str.count] = '\0'; // null-terminate for backwards compatibility?
return str;
}
void string_free_untracked (string s) { // #unsafe
Aligned_Free(s.data);
}
string copy_string_no_context (string s) {
if (s.count <= 0)
return "";
string str = {};
str.count = s.count;
str.data = (u8*)default_allocator_new(s.count + 1);
memcpy(str.data, s.data, s.count);
str.data[str.count] = '\0'; // null-terminate for backwards compatibility?
return str;
}
string copy_string (char* c_string) {
string str = {};
s64 string_length = strlen(c_string);
if (string_length == 0)
return "";
str.data = NewArray<u8>(string_length + 1);
memcpy(str.data, c_string, string_length);
str.count = string_length;
str.data[str.count] = '\0'; // null-terminate for backwards compatibility?
return str;
}
string to_string (ArrayView<u8> str) {
return {str.count, str.data};
}
ArrayView<u8> to_view (string s) {
return {s.count, s.data};
}
void string_free (string& s) {
internal_free(s.data);
s.data = nullptr;
s.count = 0;
}
void string_free_no_context (string& s) {
default_allocator_free(s.data);
s.data = nullptr;
s.count = 0;
}
force_inline string string_view (string s, s64 start_index, s64 view_count) {
Assert(view_count >= 0); Assert(start_index >= 0);
if (view_count < 0 || start_index < 0 || start_index >= s.count) return "";
s64 new_count = view_count;
if (start_index + view_count > s.count) {
new_count = s.count - start_index;
}
return { new_count, s.data + start_index };
}
bool strings_match (string first_string, string second_string) {
return (first_string == second_string);
}
// #Unicode
string wide_to_utf8 (u16* source, s32 length) {
if (length == 0) return { };
s32 query_result = WideCharToMultiByte(CP_UTF8, 0, (LPCWCH)source, length,
nullptr, 0, nullptr, nullptr);
if (query_result <= 0) return { };
// Make room for a null terminator:
if (length != -1) {
query_result += 1;
}
u8* memory = NewArray<u8>(query_result);
string utf8_string;
utf8_string.count = query_result - 1; // null terminator is not counted
utf8_string.data = memory;
s32 result = WideCharToMultiByte(CP_UTF8, 0, (LPCWCH)source, length,
(LPSTR)memory, query_result, nullptr, nullptr);
if (result <= 0) {
internal_free(memory);
return { };
}
return utf8_string;
}
force_inline string wide_to_utf8 (wstring wstr) {
return wide_to_utf8(wstr.data, (s32)wstr.count);
}
wstring utf8_to_wide (string source) {
if (!source) return {};
s32 query_num_chars = MultiByteToWideChar(CP_UTF8, 0,
(LPCCH)source.data, (s32)source.count, // @Robustness: Silent failure if too long. @Cleanup.
nullptr, 0);
if (query_num_chars <= 0) return {};
wstring name_u16s = wstring(query_num_chars);
s32 result_num_chars = MultiByteToWideChar(CP_UTF8, 0,
(LPCCH)source.data, (s32)source.count, // @Robustness: Silent failure if too long. @Cleanup.
(LPWSTR)name_u16s.data, query_num_chars);
if (!result_num_chars) {
internal_free(name_u16s.data);
return {};
}
Assert(result_num_chars <= query_num_chars);
name_u16s.data[result_num_chars] = 0; // null terminate
return name_u16s;
}
force_inline string format_string_internal (string format, va_list args) {
string str = {};
str.count = (s64)vsnprintf(nullptr, (u64)0, (char*)format.data, args);
if (thread_context() != nullptr) {
str.data = NewArray<u8>(str.count + 1);
} else {
fprintf(stderr, "[Warning] Calling `format_string` without a valid context; falling back to default_allocator\n");
str.data = (u8*)default_allocator_new(str.count + 1);
}
// Note that vsnprintf always produces a null-terminated result!
str.count = (s64)vsnprintf((char*)str.data, (size_t)(str.count + 1), (char*)format.data, args);
return str;
}
string format_string (Allocator allocator, char* format, ...) { // #sprint
push_allocator(allocator);
// only for debugging, we should only be calling this with a valid thread_local context
// and a valid allocator.
Assert(thread_context() != nullptr);
Assert(allocator.proc != nullptr);
va_list args;
va_start(args, format);
string s = format_string_internal(format, args);
va_end(args);
return s;
}
string format_string (char* format, ...) { // #sprint
va_list args;
va_start(args, format);
string s = format_string_internal(format, args);
va_end(args);
return s;
}
force_inline String_Builder* new_string_builder (Arena_Reserve new_reserve, string label) {
return arena_array_new<u8>(1, new_reserve, label);
}
force_inline void append (String_Builder* sb, string s) {
array_add(*sb, ArrayView<u8>(s.count, s.data));
}
void append (String_Builder* sb, ArrayView<string> strings) {
s64 combined_length = 0;
for (s64 i = 0; i < strings.count; i += 1) {
combined_length += strings[i].count;
}
s64 final_length = sb->count + combined_length;
if (sb->allocated < final_length) {
array_reserve(*sb, final_length);
}
for (s64 i = 0; i < strings.count; i += 1) {
string s = strings[i];
array_add(*sb, ArrayView<u8>(s.count, s.data));
}
}
force_inline void append_no_add (String_Builder* sb, string s) {
array_add(*sb, ArrayView<u8>(s.count, s.data));
sb->count -= s.count;
}
// Unfortunately this follows the printf format, which is annoying.
// I'd rather have something like fmt::
void print_to_builder_internal (String_Builder* sb, string format, va_list args) {
s64 expected_final_count = max_array_size(*sb);// amount to reserve
if (sb->allocated < expected_final_count) {
array_reserve(*sb, expected_final_count);
}
s64 buffer_size = sb->allocated - sb->count; // available space
u8* current_point = &sb->data[sb->count];
s64 print_count = (s64)vsnprintf((char*)current_point, (size_t)buffer_size, (char*)format.data, args);
// maybe truncate ArenaArray to size so we're not committing a large block of memory?
// This could be quite slow, so we just hold onto the memory.
sb->count += print_count;
}
void print_to_builder (String_Builder* sb, string format, ...) {
s64 expected_final_count = max_array_size(*sb);
if (sb->allocated < expected_final_count) {
array_reserve(*sb, expected_final_count);
}
s64 buffer_size = sb->allocated - sb->count; // available space
u8* current_point = &sb->data[sb->count];
va_list args;
va_start(args, format);
s64 print_count = (s64)vsnprintf((char*)current_point, (size_t)buffer_size, (char*)format.data, args);
va_end(args);
sb->count += print_count;
}
string string_view (String_Builder* sb) {
// should probably ensure final byte is null terminated...
append_no_add(sb, "\0"); // doesn't increment sb.count
return to_string(to_view(*sb));
}
// for when we want to keep the string builder around and recycle the memory.
internal force_inline void reset_string_builder (String_Builder* sb, bool keep_memory) {
array_poison_range(*sb, 0, sb->count);
if (keep_memory) {
reset_keeping_memory(*sb);
} else {
array_reset(*sb);
}
}
force_inline string builder_to_string (String_Builder* sb) { // #copy_string
string final_string = copy_string(to_string(to_view(*sb)));
return final_string;
}
internal force_inline void free_string_builder (String_Builder* sb) {
arena_array_free(*sb);
}
char is_ascii_letter (char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
}
char to_lower_ascii (char c) {
if (c >= 'A' && c <= 'Z')
c = c + ('a' - 'A'); // or c += 32;
return c;
}
char to_upper_ascii (char c) {
if (c >= 'a' && c <= 'z')
c = c - ('a' - 'A'); // or c -= 32;
return c;
}
force_inline bool is_upper_ascii (char c) {
return (c >= 'A' && c <= 'Z');
}
force_inline bool is_lower_ascii (char c) {
return (c >= 'a' && c <= 'z');
}
// string to_lower_in_place (string s) { }
// Input must be ascii or utf8!
string to_lower_copy (string s_orig) {
string s = copy_string(s_orig);
for (s64 i = 0; i < s.count; i += 1) {
s.data[i] = to_lower_ascii(s.data[i]);
}
return s;
}
#define format_cstring(fmt, ...) \
(char*)format_string(fmt, ##__VA_ARGS__).data
bool is_any (u8 c, string chars) {
for_each(i, chars) {
if (chars.data[i] == c) return true;
}
return false;
}
string trim_right (string s, string chars, bool replace_with_zeros) {
s64 count = s.count;
for_each_reverse(i, s) {
if (is_any(s.data[i], chars)) {
if (replace_with_zeros) {
s.data[i] = 0;
}
count -= 1;
} else {
break;
}
}
return string_view(s, 0, count);
}
string trim_left (string s, string chars) {
s64 count = 0;
for_each(i, s) {
if (is_any(s.data[i], chars)) {
count += 1;
} else {
break;
}
}
return string_view(s, count, s.count - count);
}
string trim (string s, string chars) {
string left_trim = trim_left(s, chars);
return trim_right(left_trim, chars, false);
}
force_inline u32 ctz32(u32 x) {
#if COMPILER_MSVC
return _tzcnt_u32(x);
#else
return __builtin_ctz(x);
#endif
}
s64 find_index_of_any_from_right (string s, string bytes) {
s64 cursor = s.count-1;
while (cursor >= 0) {
if (is_any(s[cursor], bytes)) return cursor;
cursor -= 1;
}
return -1;
}
s64 find_index_from_left (string s, u8 c, s64 start_offset) {
// #NOTE (confusing!): start_offset is the start of where we want to scan from,
// and the returned offset is based on the START of s.data, not from start_offset!
u8* start_p = s.data + start_offset;
s64 len = s.count - start_offset;
__m256i vneedle = _mm256_set1_epi8((char)c);
/** AVX-512BW: __m512i v = _mm512_loadu_si512(p); __mmask64 k = _mm512_cmpeq_epi8_mask(v, vneedle); if (k) return p + _tzcnt_u64(k); */
s64 i = 0;
for (; i + 32 <= s.count; i += 32) {
// Load 256-bits of integer data from memory into dst. mem_addr does not need to be aligned on any particular boundary.
__m256i v = _mm256_loadu_si256((__m256i*)(start_p + i));
// Compare packed 8-bit integers in a and b for equality, and store the results in dst.
__m256i cmp = _mm256_cmpeq_epi8(v, vneedle);
// Create mask from the most significant bit of each 8-bit element in a, and store the result in dst.
u32 mask = _mm256_movemask_epi8(cmp);
if (mask) { return start_offset + i + ctz32(mask); }
}
// scalar tail:
for (; i < len; i += 1) {
if (start_p[i] == c) return start_offset + i;
}
return -1;
}
internal bool split_by_char (string s, char c, string* a, string* b) {
s64 index = find_index_from_left(s, c);
if (index == -1) {
(*a) = s;
(*b) = {};
return false;
}
(*a) = trim({index, s.data});
(*b) = trim({s.count-index, s.data + index + 1});
return true;
}
ArrayView<string> string_split (string s, u8 c) {
Array<string> results;
string remainder = s;
while (remainder.count) {
string left; string right;
bool found = split_by_char(remainder, c, &left, &right);
if (found) {
array_add(results, left);
} else {
array_add(results, remainder);
break;
}
remainder = right;
}
if (!remainder) array_add(results, string("")); // We want 'split' to return an unambiguous result (was there a match at the end or not?), and to be the inverse of 'join'. For this to happen, if there was a match at the very end, we just add an empty string. Maybe there is a more elegant way to structure the loop to make this happen.
return results;
}
void replace_chars (string s, string chars, u8 replacement) {
for_each(i, s) {
if (is_any(s[i], chars)) {
s[i] = replacement;
}
}
}
bool begins_with (string s, string prefix) {
if (s.count < prefix.count) return false;
string t = string_view(s, 0, prefix.count);
return (t == prefix);
}
// #path manipulation:
string path_filename (string path) {
s64 index = find_index_of_any_from_right(path, "\\/");
if (index == -1) return path;
index += 1;
return string_view(path, index, path.count - index);
}
string path_strip_filename (string path) {
s64 index = find_index_of_any_from_right(path, "\\/");
if (index == -1) return "";
return string_view(path, 0, index + 1);
}
#if OS_WINDOWS
string strip_leading_backslash (string path) {
if (path == "") return "";
if (path[0] == '\\') {
return string_view(path, 1, path.count - 1);
}
return path;
}
bool is_absolute_path (string path) {
if (path.count < 3) return false;
return path[1] == ':' && (path[2] == '/' || path[2] == '\\');
}
#endif
bool path_extension (string path, string* ext) {
(*ext) = "";
s64 index = find_index_of_any_from_right(path, ".\\/");
if (index <= 0) {
return false;
}
if (path[index] != '.') {
return false;
}
u8 previous = path[index-1];
if ( (previous == '\\') || (previous == '/') ) {
return false;
}
if (previous == '.') {
if (index == 1) return false;
u8 two_previous = path[index-2];
if (two_previous == '\\' || two_previous == '/') {
return false;
}
}
(*ext) = string_view(path, index + 1, path.count - index - 1);
return true;
}

254
lib/Base/String.h Normal file
View File

@ -0,0 +1,254 @@
#pragma once
// #TODO: #strings:
// [ ] see: #Parsing stuff:
// [?] How do I accept variadic arguments of any type to my print function?
// [ ] Need to sort out how formatted strings and string builders are allocated
// [ ] Separate functions for temp alloc (tprint??)
// [ ] I should also put path manipulation here or in a separate file?
struct string {
s64 count;
u8* data;
// Construct from a string literal or C-string
string () { // default constructor
count = 0;
data = nullptr;
}
string (char* cstr) {
count = strlen(cstr);
data = (u8*)cstr;
}
string (u8* cstr) {
count = strlen((char*)cstr);
data = cstr;
}
string (s64 _count, char* str) { count = _count; data = (u8*)str; }
string (s64 _count, u8* str) { count = _count; data = str; }
bool operator==(const string& other) const {
string first_string = *this;
string second_string = other;
// return strings_match(*this, other);
if (first_string.count != second_string.count) {
return false;
}
for (s64 i = 0; i < first_string.count; i += 1) {
if (first_string.data[i] != second_string.data[i]) {
return false;
}
}
return true;
}
bool operator ! () {
Assert(count >= 0);
return (data == nullptr || count == 0);
}
bool operator!=(const string& other) const {
return !(*this == other);
}
u8& operator[](s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return data[index];
}
};
struct wstring {
s64 count;
u16* data;
wstring () { // default constructor
count = 0;
data = nullptr;
}
wstring (s32 length) {
data = NewArray<u16>(length + 1);
s32 length_bytes = (length + 1) * sizeof(u16);
count = length_bytes;
}
wstring (s64 _count, u16* _data) {
count = _count;
data = _data;
}
bool operator ! () {
Assert(count >= 0);
return (data == nullptr || count == 0);
}
u16& operator[](s64 index) {
#if ARRAY_ENABLE_BOUNDS_CHECKING
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
#endif
return data[index];
}
};
// ~Keep these API
bool is_valid (string s);
bool is_c_string (string s);
u8* to_c_string (string s); // #allocates
force_inline string copy_string (Allocator allocator, string s);
string copy_string (string s); // #allocates, returned string is #null-terminated.
string copy_string_no_context (string s);
string copy_string_untracked (string s);
void string_free_no_context (string& s);
void string_free_untracked (string s);
string copy_string (char* c_string); // #allocates, returned string is #null-terminated.
string to_string (ArrayView<u8> str);
ArrayView<u8> to_view (string s);
void string_free(string& s);
// String manipulation & comparison
force_inline string string_view (string s, s64 start_index, s64 view_count);
bool strings_match (string first_string, string second_string);
// #Unicode
string wide_to_utf8 (u16* source, s32 length=-1);
wstring utf8_to_wide (string source);
// string format_string_temp (char* format, ...);
force_inline string format_string (Allocator allocator, char* format, ...);
string format_string (char* format, ...);
// string format_string_no_context (char* format, ...);
string to_lower_copy (string s_orig);
string DEFAULT_SPACES = " \r\t\n";
string trim_right (string s, string chars=DEFAULT_SPACES, bool replace_with_zeros=true);
string trim_left (string s, string chars=DEFAULT_SPACES);
string trim (string s, string chars=DEFAULT_SPACES);
s64 find_index_of_any_from_right (string s, string bytes);
s64 find_index_from_left (string s, u8 c, s64 start_offset=0);
ArrayView<string> string_split (string s, u8 c);
// #path manipulation:
string path_filename (string path);
string path_strip_filename (string path);
// #TODO #Parsing stuff:
// is_white_space(char: u8)
// advance
// eat_spaces
// string to type or type to string conversions
// s64 string_to_int (string v, s32 base = 10, s64* remainder=nullptr);
// f64 string_to_f64
// f32 string_to_f32
// Need an API for inserting various types (ints, floats, etc.) into a String_Builder, and advancing
// the count.
internal force_inline u16 ascii_char_sort_key (u8 c) {
// Non-ASCII UTF-8 byte → unknown symbol (comes first)
if (c & 0x80) {
return (0u << 12) | c;
}
// group: 0 = punct/symbol, 1 = digit, 2 = letter
if (c >= '0' && c <= '9') {
return (1u << 12) | (u16)(c - '0');
}
if (c >= 'a' && c <= 'z') {
// lowercase first
return (2u << 12) | (u16)((c - 'a') << 1);
}
if (c >= 'A' && c <= 'Z') {
// uppercase second
return (2u << 12) | (u16)((c - 'A') << 1) | 1u;
}
// punctuation / symbols: preserve ASCII order
return (0u << 12) | c;
}
s32 string_lexicographical_compare(const void* p_a, const void* p_b) {
string* a = (string*)p_a;
string* b = (string*)p_b;
s64 n = (a->count < b->count) ? a->count : b->count;
for (s64 i = 0; i < n; i += 1) {
u16 ka = ascii_char_sort_key(a->data[i]);
u16 kb = ascii_char_sort_key(b->data[i]);
if (ka < kb) return -1;
if (ka > kb) return 1;
}
// if all shared prefix bytes are equal, the shorter string comes first.
if (a->count < b->count) return -1;
if (a->count > b->count) return 1;
return 0;
}
s32 string_lexicographical_compare_memcmp(const void* a, const void* b) {
// byte-wise lexicographical order, not locale-aware
string *sa = (string*)a;
string *sb = (string*)b;
s64 min_count = sa->count < sb->count ? sa->count : sb->count;
s32 r = memcmp(sa->data, sb->data, min_count);
if (r != 0) return r;
// if all shared prefix bytes are equal, the shorter string comes first.
if (sa->count < sb->count) return -1;
if (sa->count > sb->count) return 1;
return 0;
}
s32 string_index_lexicographical_compare(const void* p_a, const void* p_b, void* ctx) {
u32 ia = *(u32 *)p_a;
u32 ib = *(u32 *)p_b;
ArrayView<string> strings = *(ArrayView<string>*)ctx;
return string_lexicographical_compare(&strings[ia], &strings[ib]);
}
// #TODO: This is really slow! Even in release mode!!
force_inline ArrayView<u32> string_sort_by_index (ArrayView<string> source) {
ArrayView<u32> indices = ArrayView<u32>(source.count, false); // should not init?
for_each(i, indices) { indices[i] = i; }
sort_r(indices.data, indices.count, sizeof(u32), string_index_lexicographical_compare, &source);
return indices;
}
struct Tokenizer {
string s;
u8* start;
u8* end;
u8* current;
};
Tokenizer get_tokenizer_from_string (string s, bool make_copy=true) {
Tokenizer t;
if (make_copy) {
t.s = copy_string(s);
} else {
t.s = s;
}
t.start = t.s.data;
t.end = t.s.data + t.s.count;
t.current = t.start;
return t;
}

25
lib/Base/String_Builder.h Normal file
View File

@ -0,0 +1,25 @@
// #string_builder
// #limitations This won't be as fast as Jon's String_Builder in jai because we're backing it with an
// Arena, which calls VirtualAlloc, which is much slower than just using stack space to start.
// It also has a max capacity depending on what Arena_Reserve we choose.
// That being said, the implementation is much simpler, and we can keep it around for a bit.
// We can make it a lot faster by always having a string builder available in the thread_context,
// and just fetching that when we need it.
typedef ArenaArray<u8> String_Builder; // struct String_Builder
force_inline String_Builder* new_string_builder (Arena_Reserve new_reserve=Arena_Reserve::Size_64K, string label="");
force_inline void append (String_Builder* sb, string s);
void append (String_Builder* sb, ArrayView<string> strings);
// This should probably be called append_but_do_not_increment_count
internal force_inline void append_no_add (String_Builder* sb, string s); // for appending null terminators, does not increment count.
void print_to_builder (String_Builder* sb, string format, ...);
void print_to_builder_internal (String_Builder* sb, string format, va_list args);
string string_view (String_Builder* sb);
internal force_inline void reset_string_builder (String_Builder* sb, bool keep_memory=false);
force_inline string builder_to_string (String_Builder* sb); // returns copy and frees string_builder
internal force_inline void free_string_builder (String_Builder* sb);

View File

@ -0,0 +1,265 @@
// #move to substring matching algorithms
#include <memory.h> // memchr
#include <immintrin.h> // _mm256 AVX2
// #TODO: rename these routines, add routines for 3B and 4B string matching.
// For >= 5B, use BMH or something like it.
// Reference: https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html
bool memchr2c_avx2 (u8*p, u8 a, u8 b, u16 len) {
// For 2-byte substring search ("ab" in "abacus")
if (len < 2) return false;
__m256i va = _mm256_set1_epi8((char)a);
__m256i vb = _mm256_set1_epi8((char)b);
/**AVX-512BW __mmask64 k = _mm512_cmpeq_epi8_mask(v0, va) & _mm512_cmpeq_epi8_mask(v1, vb); if (k) return p + _tzcnt_u64(k); */
u16 i = 0;
// load 33 bytes total via two overlapping loads
for (; i + 33 <= len; i += 32) {
__m256i v0 = _mm256_loadu_si256((__m256i*)(p + i));
__m256i v1 = _mm256_loadu_si256((__m256i*)(p + i + 1));
__m256i ca = _mm256_cmpeq_epi8(v0, va);
__m256i cb = _mm256_cmpeq_epi8(v1, vb);
u32 mask = _mm256_movemask_epi8(_mm256_and_si256(ca, cb));
if (mask) return true; //p + i + _tzcnt_u32(mask);
}
// scalar tail
for (; i + 1 < len; i++)
if (p[i] == a && p[i+1] == b)
return true;
return false;
}
bool memchr2_avx2 (u8*p, u8 a, u8 b, u16 len) {
// For two non-consecutive chars a and b anywhere in target string.
__m256i va = _mm256_set1_epi8((char)a);
__m256i vb = _mm256_set1_epi8((char)b);
/** AVX-512BW: __mmask64 k = _mm512_cmpeq_epi8_mask(v, va) | _mm512_cmpeq_epi8_mask(v, vb); if (k) return p + _tzcnt_u64(k);*/
u16 i = 0;
for (; i + 32 <= len; i += 32) {
__m256i v = _mm256_loadu_si256((__m256i*)(p + i));
__m256i ca = _mm256_cmpeq_epi8(v, va);
__m256i cb = _mm256_cmpeq_epi8(v, vb);
__m256i m = _mm256_or_si256(ca, cb);
u32 mask = _mm256_movemask_epi8(m);
if (mask) { return true; }
}
for (; i < len; i += 1) {
if (p[i] == a || p[i] == b) return true;
}
return false;
}
bool memchr_avx2 (u8* p, u8 c, u16 len) {
// Broadcast 8-bit integer a to all elements of dst. (vpbroadcastb)
__m256i vneedle = _mm256_set1_epi8((char)c);
/** AVX-512BW: __m512i v = _mm512_loadu_si512(p); __mmask64 k = _mm512_cmpeq_epi8_mask(v, vneedle); if (k) return p + _tzcnt_u64(k); */
u16 i = 0;
for (; i + 32 <= len; i += 32) {
// Load 256-bits of integer data from memory into dst. mem_addr does not need to be aligned on any particular boundary.
__m256i v = _mm256_loadu_si256((__m256i*)(p + i));
// Compare packed 8-bit integers in a and b for equality, and store the results in dst.
__m256i cmp = _mm256_cmpeq_epi8(v, vneedle);
// Create mask from the most significant bit of each 8-bit element in a, and store the result in dst.
u32 mask = _mm256_movemask_epi8(cmp);
if (mask) {
return true;
}
}
// scalar tail:
for (; i < len; i += 1) {
if (p[i] == c) return true;
}
return false;
}
bool equal_nocase (string a, string b) {
if (a.count != b.count) return false;
for_each(i, a) {
if (to_lower_ascii(a[i]) != to_lower_ascii(b[i])) {
return false;
}
}
return true;
}
s64 find_index_from_left_no_case (string s, string substring, s64 start_index = 0) {
if (!substring) return -1;
for (s64 i = start_index; i < (s.count - substring.count + 1); i += 1) {
string t = string_view(s, i, substring.count);
if (equal_nocase(t, substring)) return i;
}
return -1;
}
// Fuzzy_Filter implementation copied from focus-editor
// source: https://github.com/focus-editor/focus
struct Fuzzy_Filter {
string full_string;
struct Chunk {
string str;
ArrayView<string> chars;
};
ArrayView<Chunk> chunks;
};
void fuzzy_filter_free (Fuzzy_Filter* ff) {
string_free(ff->full_string);
array_free(ff->chunks);
zero_struct(ff);
}
Fuzzy_Filter construct_fuzzy_filter (string filter_string, bool multi_chunk_search=false) {
push_allocator(temp());
string s = copy_string(trim(filter_string));
if (!s) return {};
Array<Fuzzy_Filter::Chunk> chunks;
chunks.allocator = temp();
ArrayView<string> strings;
if (multi_chunk_search) {
u8 space = ' ';
replace_chars(s, "\\/", space); // < THIS DOESN'T WORK FOR FILESYSTEM SEARCHES!
strings = string_split(s, space);
} else {
strings = ArrayView<string>(1);
strings[0] = s;
}
for_each(i, strings) {
string current = strings[i];
if (!current) continue;
Array<string> chars;
chars.allocator = temp();
array_reserve(chars, strings.count);
u8* t = current.data;
while (t < current.data + current.count) {
string substring;
substring.data = t;
t = unicode_next_character(t);
substring.count = t - substring.data;
array_add(chars, substring);
}
array_add(chunks, {s, chars});
}
Fuzzy_Filter ff = {};
ff.full_string = s;
ff.chunks = chunks;
return ff;
}
ArrayView<bool> fuzzy_match (string str, Fuzzy_Filter filter, bool exact_match_only,
s32* score, bool* exact_match) {
ArrayView<bool> highlights = ArrayView<bool>(str.count);
if (!is_valid(filter.chunks)) {
(*score) = 0;
(*exact_match) = false;
return highlights;
}
// #TODO: Document the scoring system more clearly.
constexpr s32 MAX_CHARS = 256;
{ // Try matching the full string first and rank it highest
s64 index = find_index_from_left_no_case(str, filter.full_string, 0);
if (index >= 0) {
memset(highlights.data + index, 1, filter.full_string.count);
(*score) = (MAX_CHARS + filter.full_string.count) * (MAX_CHARS - index);
(*exact_match) = true;
return highlights;
}
}
auto_release_temp();
push_allocator(temp());
s32 final_score = 0;
ArrayView<bool> chunk_highlights = ArrayView<bool>(str.count, false);
// Try matching each chunk exactly and accept the match if at least one matches
for_each(c, filter.chunks) {
Fuzzy_Filter::Chunk chunk = filter.chunks[c];
s32 chunk_score = 0;
memset(chunk_highlights.data, 0, chunk_highlights.count);
s64 index = find_index_from_left_no_case(str, chunk.str, 0);
if (index >= 0) {
memset(chunk_highlights.data + index, 1, chunk.str.count);
chunk_score += (MAX_CHARS/2 + chunk.str.count) * (MAX_CHARS - index) - str.count;
}
if (chunk_score >= 0) {
final_score += chunk_score;
for_each(ch, chunk_highlights) {
highlights[ch] |= chunk_highlights[ch];
}
}
}
if (final_score > 0) {
(*score) = final_score;
(*exact_match) = true; // if we had at least one exact match don't proceed
return highlights;
}
if (final_score <= 0 && exact_match_only) {
(*score) = final_score;
(*exact_match) = false;
return highlights;
}
// Now match individual characters:
for_each(c, filter.chunks) {
Fuzzy_Filter::Chunk chunk = filter.chunks[c];
s32 pos = 0;
s32 chunk_score = 0;
memset(chunk_highlights.data, 0, chunk_highlights.count);
// Try matching the full chunk first:
for_each(i, chunk.chars) {
string character = chunk.chars[i];
s64 index = find_index_from_left_no_case(str, character, pos);
if (index < 0) {
chunk_score = -1;
break;
}
chunk_highlights[index] = true;
chunk_score += 10 * (MAX_CHARS - index); // The closer to the beginning the better
pos = index + character.count;
}
if (chunk_score >= 0) {
final_score += chunk_score;
// apply chunk highlights
for_each(ch, chunk_highlights) {
highlights[ch] |= chunk_highlights[ch];
}
}
}
(*score) = final_score;
(*exact_match) = false;
return highlights;
}

319
lib/Base/Thread_Group.cpp Normal file
View File

@ -0,0 +1,319 @@
// Thread_Group Internal Procedures
// #NOTE: There is no logging in this implementation!
void init(Work_List* list) {
Assert(list != nullptr);
semaphore_init(&list->semaphore);
mutex_init(&list->mutex);
}
void destroy(Work_List* list) {
Assert(list != nullptr);
semaphore_destroy(&list->semaphore);
mutex_destroy(&list->mutex);
}
void add_work(Work_List* list, Work_Entry* entry) {
lock(&list->mutex);
if (list->last) { // This list has nodes in it. Put this entry onto the end so we're FIFO
list->last->next = entry;
list->last = entry;
} else { // The list is empty, the list will have 1 entry:
list->first = entry;
list->last = entry;
}
list->count += 1;
unlock(&list->mutex);
signal(&list->semaphore);
}
Work_Entry* get_work(Work_List* list) {
lock(&list->mutex);
// Grab first node; may be null if work has been stolen
Work_Entry* result = list->first;
if (result) {
// Update the head of the list to be the next item
list->first = result->next;
// If the new 'first' pointer is null, the list has become empty, so set 'last' to null also
if (list->first == nullptr) {
list->last = nullptr;
}
}
unlock(&list->mutex);
return result;
}
s64 thread_group_run (Thread* thread) {
// This is the main loop that a particular thread runs.
// It waits on its semaphore, looking for new work on its 'available' list.
// When it finds work, it calls the thread group procedure with the work,
// then puts the work into its 'completed' list.
Worker_Info* info = thread->worker_info;
Thread_Group* group = info->group;
Work_Entry* entry = nullptr;
while(!group->should_exit) {
if (!entry) {
wait_for(&info->available.semaphore);
if (group->should_exit) break;
// Remove work from the list. Might be none if another thread stole.
entry = get_work(&info->available);
}
if (entry) {
entry->thread_index = thread->index;
entry->next = nullptr;
Thread_Continue_Status should_continue = Thread_Continue_Status::CONTINUE;
if (group->proc) {
should_continue = group->proc(group, thread, entry->work);
}
// The work is done, add it to the completed list:
add_work(&info->completed, entry);
if (should_continue == Thread_Continue_Status::STOP) {
break;
}
}
if (info->work_steal_indices.count) {
if (group->should_exit) break;
// Check for more work. If there's none, try to steal some before going to sleep.
entry = get_work(&info->available);
if (entry) {
// Decrement the semaphore for the work we dequeued.
wait_for(&info->available.semaphore);
} else { // no work left, let's steal some
for (s64 i = 0; i < info->work_steal_indices.count; i += 1) {
entry = get_work(&group->worker_info[i].available);
if (entry) {
break;
}
}
}
} else {
entry = nullptr;
}
}
return 0;
}
// Thread_Group API: I think this should explicitly take an allocator, or just always use default_allocator()
void thread_group_init (Thread_Group* group, s32 group_thread_count, Thread_Group_Proc group_proc,
string label, bool enable_work_stealing = false) {
// Set allocator if not already set:
if (!group->allocator.proc) {
group->allocator = default_allocator();
}
// should we copy label
string label_internal = format_string(temp(), "%s Thread Group Init", label.data);
push_allocator_label(label_internal);
push_allocator(group->allocator);
group->name = copy_string(label);
group->worker_info = ArrayView<Worker_Info>(group_thread_count);
group->proc = group_proc;
for (s64 i = 0; i < group->worker_info.count; i += 1) {
Worker_Info* info = &group->worker_info[i];
string thread_name = format_string(temp(), "Thread Group %s, thread %lld", label.data, i);
thread_init(&info->thread, thread_group_run, thread_name);
info->thread.worker_info = info;
init(&info->available);
init(&info->completed);
info->group = group;
info->worker_index = (s32)i;
if (enable_work_stealing) {
// Make an array that contains all worker indices except for ours.
// This gets shuffled for work stealing. Why? Because we want to
// search through 2 threads for work, but if they are mostly empty,
// we might have to scan through the whole array anyway. Maybe this
// is not the best way to do it.
info->work_steal_indices = ArrayView<s32>(group_thread_count-1, false);
s32 cursor = 0;
for (s32 j = 0; j < group_thread_count; j += 1) {
if (j == i) continue;
info->work_steal_indices[cursor] = j;
cursor += 1;
}
}
}
group->initialized = true;
}
void thread_group_start (Thread_Group* group) {
Assert(group->worker_info.count > 0);
for (s64 i = 0; i < group->worker_info.count; i += 1) {
Worker_Info* info = &group->worker_info[i];
thread_start(&info->thread);
}
group->started = true;
}
bool thread_group_shutdown (Thread_Group* group, s32 timeout_milliseconds = -1) {
Assert(group->initialized);
group->should_exit = true;
bool all_threads_done = true;
if (group->started) {
for (s64 i = 0; i < group->worker_info.count; i += 1) {
Worker_Info* info = &group->worker_info[i];
signal(&info->available.semaphore);
}
f64 start = 0;
if (timeout_milliseconds > 0) {
start = GetUnixTimestamp();
}
s64 remaining_timeout_ms = (s64)timeout_milliseconds;
for (s64 i = 0; i < group->worker_info.count; i += 1) {
Worker_Info* info = &group->worker_info[i];
if (remaining_timeout_ms > 0) {
s64 time_elapsed_ms = (s64)((GetUnixTimestamp() - start) * 1000.0);
remaining_timeout_ms = (timeout_milliseconds - time_elapsed_ms);
if (remaining_timeout_ms < 0)
remaining_timeout_ms = 0;
}
bool is_done = thread_is_done(&info->thread, (s32)remaining_timeout_ms);
if (!is_done)
all_threads_done = false;
}
}
if (!all_threads_done) return false;
for (s64 i = 0; i < group->worker_info.count; i += 1) {
Worker_Info* info = &group->worker_info[i];
thread_deinit(&info->thread, false);
destroy(&info->available);
destroy(&info->completed);
array_free(info->work_steal_indices);
}
array_free(group->worker_info);
return true;
}
// Should have a shutdown_and_reset option too (see how I did it in prototyping-main)
void add_work (Thread_Group* group, void* work) {
Assert(group->worker_info.count > 0);
push_allocator(group->allocator);
// Make a work entry, a linked list node that lets us queue and unqueue
Work_Entry* entry = New<Work_Entry>();
entry->work = work;
entry->issue_time = GetUnixTimestamp();
// Choose which thread will run this work.
s32 thread_index = group->next_worker_index;
group->next_worker_index += 1;
if (group->next_worker_index >= group->worker_info.count) {
group->next_worker_index = 0;
}
entry->work_list_index = thread_index;
// Add this node to the linked list of available work for that thread:
Work_List* list = &group->worker_info[thread_index].available;
add_work(list, entry);
}
ArrayView<void*> get_completed_work (Thread_Group* group) {
Array<void*> results = Array<void*>();
results.allocator = temp();
push_allocator(group->allocator);
// We iterate over every worker thread to see if anything has completed.
// Note that if this Thread_Group is idle most of the time, and you call
// get_completed_work once per frame, most of the time this loop will
// just be waste. Hopefully it's not very much waste compared to everything else
// your program is doing, but in the future we may add an early-out:
// if all work was completed before, and we never added new work, obviously
// we don't need to do anything, as there can't be any new work.
for (s64 i = 0; i < group->worker_info.count; i += 1) {
Worker_Info* info = &group->worker_info[i];
Work_List* list = &info->completed;
Work_Entry* completed = nullptr;
s32 new_count = 0;
{ lock(&list->mutex);
new_count = list->count;
completed = list->first;
if (list->first) {
list->first = nullptr;
list->last = nullptr;
list->count = 0;
}
unlock(&list->mutex);
}
if (!completed) continue;
// #TODO: #Thread_Group #array_reserve - try to do this in two passes:
// Note that we are maybe adding small numbers of results over a larger
// number of cores. Really, if we want to be efficient here, we can build
// a larger linked list out of the mini-lists we gather, and accumulate
// the counts, then do the reserve all in one batch when we are done
// looking at the threads. For simplicity this has not yet been done,
// but it may not be much more complicated, actually.
array_reserve(results, results.count + new_count);
s64 old_count = results.count;
while (completed) {
array_add(results, completed->work);
Work_Entry* next = completed->next;
internal_free(completed);
completed = next;
}
Assert(results.count == old_count + new_count);
}
return ArrayView<void*>(results);
}

101
lib/Base/Threads.cpp Normal file
View File

@ -0,0 +1,101 @@
// really #hacky forward declares.
struct Work_Entry;
struct Worker_Info;
struct Work_List;
struct Thread_Group;
void init(Work_List* list);
void destroy(Work_List* list);
s64 thread_group_run (Thread* thread);
struct Thread {
Thread_Context* context;
Thread_Proc proc;
void* data;
s64 index;
OS_Thread os_thread;
// Used by Thread_Group
Worker_Info* worker_info;
};
global u32 next_thread_index = 1;
// Thread Group API (Copied from Jonathan Blow's implementation - I did not come up with this.)
struct Work_Entry {
Work_Entry* next;
void* work;
s64 thread_index; // Thread.index for the thread that handled this work
// string logging_name;
f64 issue_time;
s32 work_list_index;
};
struct Work_List {
Semaphore semaphore;
Mutex mutex;
Work_Entry* first;
Work_Entry* last;
s32 count;
};
struct Worker_Info {
Thread thread;
Work_List available;
Work_List completed;
Thread_Group* group;
s32 worker_index;
u8 padding0[44];
// Work steal indices should be on another cache line:
ArrayView<s32> work_steal_indices;
u8 padding1[48];
};
static_assert(sizeof(Worker_Info) % 64 == 0); // This MUST be padded to cache line!
enum class Thread_Continue_Status: s32 {
STOP = 0,
CONTINUE = 1
};
typedef Thread_Continue_Status (*Thread_Group_Proc)(Thread_Group* group, Thread* thread, void* work);
struct Thread_Group {
void* data;
Thread_Group_Proc proc;
string name;
Allocator allocator; // for allocating work indices
ArrayView<Worker_Info> worker_info; // only alloc'd once with allocator??
s32 next_worker_index;
bool initialized = false;
bool started = false;
bool should_exit = false;
};
// This might be too slow.
s32 get_thread_index (Thread_Group* group, s32 thread_index) {
for_each(w, group->worker_info) {
if (group->worker_info[w].thread.index == thread_index) {
return (s32)w; // zero-indexed to thread group
}
}
return -1;
}
void context_arenas (Thread_Context* context, Array<Arena*>* arenas_out, bool recursive=false) {
if (context->temp) array_add(*arenas_out, (Arena*)context->temp);
if (context->arena) array_add(*arenas_out, (Arena*)context->arena);
if (context->log_builder->arena)
array_add(*arenas_out, context->log_builder->arena);
if (context->string_builder->arena)
array_add(*arenas_out, context->string_builder->arena);
if (context->error_arena)
array_add(*arenas_out, context->error_arena);
if (recursive) {
for_each(c, context->child_threads) {
auto next_context = context->child_threads[c]->context;
if (next_context == nullptr) continue;
context_arenas(next_context, arenas_out, true);
}
}
}

203
lib/Base/Timing.h Normal file
View File

@ -0,0 +1,203 @@
// #Timing API:
#define System_Timed_Block_Print(name) \
system_timed_block_print Concat(_sys_timed_block_print_guard, __LINE__)(name)
#define Timed_Block_Print(name) \
timed_block_print Concat(_timed_block_print_guard, __LINE__)(name)
// This is for timing stuff that happens where context is not available.
// memory will leak from this operation. #MemoryLeak #NoContext
#define Timed_Block_Print_No_Context(name) \
timed_block_print_no_context Concat(_timed_block_print_guard, __LINE__)(name)
force_inline u64 rdtsc();
#if OS_WINDOWS
#include <intrin.h>
#pragma intrinsic(__rdtsc)
force_inline u64 rdtsc() {
return __rdtsc();
}
#endif
global u32 g_cpu_base_frequency_megahertz;
void set_cpu_base_frequency (u32 frequency_megahertz) {
g_cpu_base_frequency_megahertz = frequency_megahertz;
if (!g_cpu_base_frequency_megahertz) {
printf("[Warning] Unable to set CPU base frequency by reading CPUID. Falling back to default value of 3000 MHz\n");
g_cpu_base_frequency_megahertz = 3000; // default value
} else {
printf("CPU Base Frequency detected as %d MHz\n", g_cpu_base_frequency_megahertz);
}
}
string format_time_seconds_no_context (f64 time) { // #MemoryLeak #NoContext
if (time < 1.0e-6) { return format_string("%1.2f ns", time * 1.0e9); }
if (time < 1.0e-3) { return format_string("%1.3f us", time * 1.0e6); }
if (time < 1) { return format_string("%1.3f ms", time * 1.0e3); }
return format_string("%1.3f s", time);
}
string format_cycles_no_context (u64 ticks) { // #MemoryLeak #NoContext
string units[5] = { "cycles", "K cycles", "M cycles", "B cycles", "T cycles" };
f64 count_f64 = (f64)ticks;
s64 unit_index = 0;
while (count_f64 >= 1000 && unit_index < 4) { // 4 is from (units.count-1)
count_f64 /= 1000.0;
unit_index += 1;
}
return format_string("%1.2f %s", count_f64, units[unit_index].data);
}
string format_time_seconds (f64 time) {
if (time < 1.0e-6) { return format_string("%1.2f ns", time * 1.0e9); }
if (time < 1.0e-3) { return format_string("%1.3f us", time * 1.0e6); }
if (time < 1) { return format_string("%1.3f ms", time * 1.0e3); }
return format_string("%1.3f s", time);
}
string format_int_with_commas (s64 value) {
char tmp[32];
snprintf(tmp, sizeof(tmp), "%lld", value);
s32 tmp_len = (s32)strlen(tmp);
s32 commas = (tmp_len - 1) / 3;
s32 new_len = tmp_len + commas;
string new_string;
new_string.data = NewArray<u8>(new_len + 1);
new_string.count = new_len;
s32 i = tmp_len - 1;
s32 j = new_len - 1;
s32 group = 0;
while (i >= 0) {
new_string[j] = tmp[i];
j -= 1;
i -= 1;
group += 1;
if (group == 3 && i >= 0) {
new_string[j] = ',';
j -= 1;
group = 0;
}
}
return new_string;
}
string format_int_with_leading_spaces (s32 value, s32 digits_to_show) {
Assert(digits_to_show < 12); // no way you have a trillion files, bro.
string new_string;
new_string.data = NewArray<u8>(32 + 1);
new_string.count = snprintf((char*)new_string.data, (size_t)33, (char*)"%*d", digits_to_show, value);
return new_string;
}
string format_cycles (u64 ticks) {
string units[5] = { "cycles", "K cycles", "M cycles", "B cycles", "T cycles" };
f64 count_f64 = (f64)ticks;
s64 unit_index = 0;
while (count_f64 >= 1000 && unit_index < 4) { // 4 is from (units.count-1)
count_f64 /= 1000.0;
unit_index += 1;
}
return format_string("%1.2f %s", count_f64, units[unit_index].data);
}
string format_bytes (s64 bytes, s32 trailing_width = 3) {
if (bytes == 0) return copy_string(" - B");
string units[6] = { "B", "KB", "MB", "GB", "TB", "PB" };
f64 count_f64 = (f64)bytes;
s32 unit_index = 0;
while (count_f64 >= 1024 && unit_index < (5)) {
count_f64 /= 1024.0;
unit_index += 1;
}
// This makes the trailing width param kinda pointless... idk.
if (unit_index == 0) trailing_width = 0;
// if (unit_index == 1) trailing_width = 2;
switch (trailing_width) {
case 0: return format_string("%.0f %s", count_f64, units[unit_index].data);
case 1: return format_string("%.1f %s", count_f64, units[unit_index].data);
case 2: return format_string("%.2f %s", count_f64, units[unit_index].data);
case 3: return format_string("%.3f %s", count_f64, units[unit_index].data);
default: break;
}
Assert(trailing_width >= 0 && trailing_width <= 3);
return "";
}
struct timed_block_print {
string block_name;
u64 start_tick;
timed_block_print(string _block_name) {
Assert(g_cpu_base_frequency_megahertz != 0);
Assert(thread_local_context != nullptr); // we need temp allocator initialized!
block_name = _block_name;
start_tick = rdtsc();
}
~timed_block_print() {
u64 end_tick = rdtsc();
u64 tick_difference = end_tick - start_tick;
f64 ticks_f64 = (f64)tick_difference;
f64 elapsed_time_seconds = ticks_f64 / (f64)((s64)g_cpu_base_frequency_megahertz * 1000000);
push_allocator(temp());
log("[Timed_Block %s]: %s (%s)", block_name.data, format_time_seconds(elapsed_time_seconds).data, format_cycles(tick_difference).data);
}
};
struct timed_block_print_no_context {
string block_name;
u64 start_tick;
timed_block_print_no_context(string _block_name) {
Assert(g_cpu_base_frequency_megahertz != 0);
block_name = _block_name;
start_tick = rdtsc();
}
~timed_block_print_no_context() {
u64 end_tick = rdtsc();
u64 tick_difference = end_tick - start_tick;
f64 ticks_f64 = (f64)tick_difference;
f64 elapsed_time_seconds = ticks_f64 / (f64)((s64)g_cpu_base_frequency_megahertz * 1000000);
printf("[Timed_Block(No Context) %s]: %s (%s)\n",
block_name.data,
format_time_seconds_no_context(elapsed_time_seconds).data,
format_cycles_no_context(tick_difference).data);
}
};
struct system_timed_block_print {
string block_name;
f64 start_time;
system_timed_block_print(string _block_name) {
block_name = _block_name;
start_time = GetUnixTimestamp();
}
~system_timed_block_print() {
f64 end_time = GetUnixTimestamp();
f64 elapsed_time_seconds = end_time - start_time;
push_allocator(temp());
log("[Timed_Block %s]: %s", block_name.data, format_time_seconds(elapsed_time_seconds).data);
}
};

57
lib/Base/Unicode.cpp Normal file
View File

@ -0,0 +1,57 @@
constexpr u8 trailing_bytes_for_utf8[] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5};
constexpr u32 UNI_REPLACEMENT_CHAR = 0x0000FFFD;
constexpr u8 utf8_inital_byte_mask[6] = { 0x7F, 0x1F, 0x0F, 0x07, 0x03, 0x01 };
constexpr u32 UNI_MAX_UTF32 = 0x7FFFFFFF;
bool character_utf8_to_utf32 (u8* data, s64 source_length, u32* utf32, s64* source_length_out) {
u8 first_character = data[0];
s32 continuation_bytes = trailing_bytes_for_utf8[first_character];
if ((continuation_bytes + 1) > source_length) {
(*utf32) = UNI_REPLACEMENT_CHAR;
(*source_length_out) = source_length;
return false;
}
u32 ch = data[0] & utf8_inital_byte_mask[continuation_bytes];
for (s64 i = 1; i < continuation_bytes + 1; i += 1) {
ch = ch << 6;
//if strict ...
ch |= data[i] & 0x3F;
}
// #if strict... {}
(*utf32) = ch;
(*source_length_out) = continuation_bytes + 1;
if (ch > UNI_MAX_UTF32) {
(*utf32) = UNI_REPLACEMENT_CHAR;
}
return true;
}
bool next_utf8_to_utf32 (string& s, u32* utf32_char_out) {
s64 codepoint_source_length;
bool success = character_utf8_to_utf32(s.data, s.count, utf32_char_out, &codepoint_source_length);
s.data += codepoint_source_length;
s.count -= codepoint_source_length;
Assert(s.count >= 0);
return success;
}
u8* unicode_next_character (u8* s) {
u8 bytes = 1 + trailing_bytes_for_utf8[*s];
return s + bytes;
}

78
lib/Base/run_tests.cpp Normal file
View File

@ -0,0 +1,78 @@
void run_pre_setup_tests() {
// #no_context: context will not be initialized at this point, so log() doesn't work
printf("Running pre-setup tests...\n");
printf("\nFinished running pre-setup tests...\n");
}
string string_literal_example = "Hello, I am a string literal.";
void run_post_setup_tests() {
log_warning("Hello, I am a warning :), and I am a big number: %lld", (s64)2935088889357);
log_error("Hello, I am an error - %s", "I'm another string");
log_error("This is an error");
printf("Running post-setup tests...\n");
// See: main_thread_base_entry_point
{ Timed_Block_Print("string_builder_testing");
temp_reset();
push_allocator(temp());
// tip: use auto_reset or auto_release with `thread_context()->arena`
// String builder example:
// OK. I can work with this.
String_Builder* sb = context_builder();
reset_string_builder(sb, true);
append(sb, "string_literal_example");
append(sb, " ");
print_to_builder(sb, "There are %d cats in the %s", 64, "house.\n");
append(sb, " > ");
print_to_builder(sb, "some size_t: %u", (u64)3982739867);
append(sb, "\n");
print_to_builder(sb, "the literal: %s", string_literal_example.data);
// string result = string_view(sb);
string result = builder_to_string(sb); // also frees
// print((char*)result.data);
log("Hello.");
log("log() should automatically append newlines to these prints.");
log("If it doesn't, I will be very upset.");
log(result);
log("Hello. There are %s things here\n", string_literal_example.data);
print("Hello, I am just a printed message to stdout\n\n");
// Testing file writes and reads:
print("Writing some nonsense to a file.\n");
bool success = write_entire_file("D:/TempLocal/junk.txt", to_view(result));
log("Done. Success: %d\n", success);
// push_allocator(allocator(thread_context()->arena));
push_allocator(default_allocator());
string file_path = "SignalProcessing.cpp";
ArrayView<u8> file_data = read_entire_file(file_path, true, true);
log("file_data: \n");
log("%s\n", file_data.data); // #note this is not null-terminated
}
{ // Test hashing:
u64 start = 0x512585;
u32 sdbm = sdbm_hash(&start, sizeof(u64));
u64 kh = knuth_hash(start);
u64 fnv = fnv1a_hash(start);
string some_data = "Hello there, my name is Musa";
u64 result = fnv1a_hash_any(some_data.data, some_data.count);
}
{
ArenaTable<u64, u64> table;
table_init(&table);
table_resize(&table, 2048);
table_add(&table, (u64)52975125, (u64)5);
table_set(&table, (u64)52975125, (u64)99);
auto ptr = table_find_pointer(&table, (u64)52975125);
printf("ptr.* = %llu\n", *ptr);
}
printf("\nFinished running post-setup tests...\n");
}

87
lib/Graphics.cpp Normal file
View File

@ -0,0 +1,87 @@
#if GRAPHICS_OPENGL
#include <GL/gl.h>
#pragma comment(lib, "opengl32")
#pragma comment(lib, "gdi32")
#endif
// #if GRAPHICS_DIRECTX11
// enum class Graphics_API: s32 {
// DirectX_11 = 0,
// OpenGL_3 = 1,
// };
struct Shader {};
struct Texture {};
struct Vertex {
Vec2 position;
Vec4 color_scale;
Vec2 uv0;
// The following values are for rendering rounded rectangles
// It's OK to put these here because for basic rendering the performance is "good enough"
Vec2 rect_center;
Vec2 rect_size;
f32 rect_corner_radius;
f32 rect_edge_softness = 1.0;
};
constexpr s64 starting_vertex_count = 2048;
struct Graphics {
// Graphics_API api = Graphics_API::OpenGL_3;
Window_Info current_window;
Shader* current_shader;
Texture* texture_render_target;
Array<Vertex> vertices; // just init to max count.
};
void graphics_thread_destroy () {
Thread_Context* context = thread_context(); // maybe turn this into a :macro?
array_free(context->graphics->vertices);
internal_free(context->graphics);
}
void graphics_reset () {
Thread_Context* context = thread_context(); // maybe turn this into a :macro?
context->graphics->current_shader = nullptr;
}
Graphics* graphics_thread_init () {
Thread_Context* context = thread_context();
Assert(context != nullptr);
push_allocator(default_allocator());
if (context->graphics == nullptr) {
context->graphics = New<Graphics>(true);
array_reserve(context->graphics->vertices, starting_vertex_count);
}
return context->graphics;
}
void graphics_flush_buffer () {
//
}
void graphics_update_current_window (Graphics* graphics) {
// graphics->current_window
}
void graphics_set_render_target (Window_Info window_info) {
Graphics* graphics = graphics_thread_init();
graphics->current_shader = nullptr;
graphics->texture_render_target = nullptr;
graphics->current_window = window_info;
// #TODO: #Graphics graphics_update_window ::
}

2727
lib/OS/OS_Win32.cpp Normal file

File diff suppressed because it is too large Load Diff

477
lib/OS/OS_Win32.h Normal file
View File

@ -0,0 +1,477 @@
#pragma comment(lib, "user32")
#pragma comment(lib, "shell32")
#pragma comment(lib, "comctl32")
// #pragma comment(lib, "shlwapi")
#include <shellapi.h>
#include <shlobj_core.h> // SHGetFolderPathW
#include <commctrl.h> // ImageList_GetIcon
f64 GetUnixTimestamp ();
s64 GetUnixTimestampNanoseconds ();
enum class Wait_For_Result: s32 {
SUCCESS = 0,
TIMEOUT = 1,
ERROR = 2 // can't use ERROR because of Windows.h *sigh*
};
internal void semaphore_init (Semaphore* sem, s32 initial_value = 0);
internal void semaphore_destroy (Semaphore* sem);
internal void signal (Semaphore* sem);
internal Wait_For_Result wait_for (Semaphore* sem, s32 milliseconds = -1);
internal void condition_variable_init (Condition_Variable* cv);
internal void condition_variable_destroy (Condition_Variable* cv);
internal void wait (Condition_Variable* cv, Mutex* mutex, s32 wait_time_ms = -1);
internal void wake (Condition_Variable* cv);
internal void wake_all (Condition_Variable* cv);
struct Thread; // #hack forward declares to get this to compile.
typedef s64 (*Thread_Proc)(Thread* thread);
internal bool thread_init (Thread* thread, Thread_Proc proc, string thread_name="");
internal void thread_deinit (Thread* thread, bool zero_thread=false);
internal void thread_start (Thread* thread, void* thread_data = nullptr);
internal bool thread_is_done (Thread* thread, s32 milliseconds=0);
typedef u32 OS_Error_Code;
internal string get_error_string (OS_Error_Code error_code);
internal bool file_is_valid (File file);
internal File file_open (string file_path, bool for_writing=false, bool keep_existing_content=false, bool log_errors=false);
internal void file_close (File* file);
internal bool file_read (File file, void* data, s64 bytes_to_read_count, s64* bytes_read_count=nullptr);
internal bool file_length (File file, s64* length);
internal bool file_length (string file_path, s64* length);
internal s64 file_current_position (File file);
internal bool file_set_position (File file, s64 position);
internal ArrayView<u8> read_entire_file (File file, bool add_null_terminator);
internal ArrayView<u8> read_entire_file (string file_path, bool add_null_terminator=false, bool log_errors=false);
// use to_byte_view to convert ArrayView<non-u8> to ArrayView<u8>
internal bool file_write (File* file, void* data, s64 length);
internal bool write_entire_file (string file_path, void* file_data, s64 count);
internal bool write_entire_file (string file_path, ArrayView<u8> file_data);
internal bool file_exists (string file_path);
// file_write
// write_entire_file...
// #TODO #fs File System Operations
// file_move, file_delete
// #window_creation
typedef HWND Window_Type;
typedef HICON Window_Icon;
struct Window_Dimensions {
s32 window_x;
s32 window_y;
s32 window_width;
s32 window_height;
};
struct Window_Info {
Window_Type window;
Window_Dimensions initial_dimensions; // for resetting.
Window_Icon icon;
Window_Icon icon_minimized;
bool is_main_window;
bool tray_icon_added;
bool minimized_to_tray;
// Platform-Specific (Win32)
HMENU tray_icon_menu;
HDC hdc;
// Likely will only be used for main window:
NOTIFYICONDATAW nid;
};
// #move: Monitor - platform-independent hardware monitor information:
struct Monitor {
s64 left;
s64 top;
s64 right;
s64 bottom;
bool primary;
bool present;
#if OS_WINDOWS
MONITORINFO monitor_info;
#endif
};
enum class Win32_Drive_Type: s32 {
Unknown = 0,
No_Root_Dir = 1,
Removable = 2,
Fixed = 3,
Remote = 4,
cdrom = 5,
ramdisk = 6
};
enum class File_System: s32 {
Unknown = 0,
MusaFS = 1,
exFAT = 2,
Ext2 = 65,
Ext3 = 66,
Ext4 = 67,
Btrfs = 79,
XFS = 86,
ZFS = 91,
NTFS = 128, // Windows
ReFS = 130, // Windows
AFS = 256, // Apple File System
F2FS = 1024,
// Legacy systems:
FAT32 = -1,
// FAT16 :: -2;
// FAT12 :: -3;
};
string to_string (Win32_Drive_Type type) {
switch (type) {
case Win32_Drive_Type::Unknown: { return "Unknown"; } break;
case Win32_Drive_Type::No_Root_Dir: { return "No_Root_Dir"; } break;
case Win32_Drive_Type::Removable: { return "Removable"; } break;
case Win32_Drive_Type::Fixed: { return "Fixed"; } break;
case Win32_Drive_Type::Remote: { return "Remote"; } break;
case Win32_Drive_Type::cdrom: { return "cdrom"; } break;
case Win32_Drive_Type::ramdisk: { return "ramdisk"; } break;
}
Assert(false);
return "Unknown";
}
string to_string (File_System fs) {
switch (fs) {
case File_System::Unknown: { return "Unknown"; } break;
case File_System::MusaFS: { return "MusaFS"; } break;
case File_System::exFAT: { return "exFAT"; } break;
case File_System::Ext2: { return "Ext2"; } break;
case File_System::Ext3: { return "Ext3"; } break;
case File_System::Ext4: { return "Ext4"; } break;
case File_System::Btrfs: { return "Btrfs"; } break;
case File_System::XFS: { return "XFS"; } break;
case File_System::ZFS: { return "ZFS"; } break;
case File_System::NTFS: { return "NTFS"; } break;
case File_System::ReFS: { return "ReFS"; } break;
case File_System::AFS: { return "AFS"; } break;
case File_System::F2FS: { return "F2FS"; } break;
case File_System::FAT32: { return "FAT32"; } break;
}
Assert(false);
return "Unknown";
}
File_System Win32_filesystem_from_string (string s) {
string s_copy = to_lower_copy(s);
if (s_copy == "ntfs") { return File_System::NTFS; }
if (s_copy == "refs") { return File_System::ReFS; }
if (s_copy == "fat") { return File_System::FAT32; }
if (s_copy == "exfat") { return File_System::exFAT; }
Assert(false);
return File_System::Unknown;
}
struct Win32_Drive {
string label;
string volume_name;
Win32_Drive_Type type;
File_System file_system;
s64 full_size;
s64 free_space;
u32 serial_number;
u32 max_component_length;
u32 file_system_flags;
bool is_present;
};
// internal void serialize_win32_drive (Serializer* s, Win32_Drive* drive);
// internal void deserialize_win32_drive (Deserializer* s, Win32_Drive* drive);
internal Win32_Drive* copy_win32_drive (Win32_Drive* drive);
struct Win32_Icon {
string path;
s32 width;
s32 height;
ArrayView<u8> bitmap;
};
typedef Win32_Icon Icon;
typedef Win32_Drive OS_Drive;
internal void os_log_error ();
bool os_window_is_minimized (Window_Type window);
bool os_main_window_is_minimized ();
bool os_create_window (string new_window_name, Window_Type parent=nullptr, bool center_window=true, bool open_on_largest_monitor=true, bool display_window=true, void* wnd_proc_override=nullptr);
Window_Info get_main_window ();
Window_Info* get_main_window_pointer ();
string os_get_machine_name ();
// #NTFS#MFT
// Reference: https://handmade.network/forums/articles/t/7002-tutorial_parsing_the_mft
#pragma pack(push,1)
struct NTFS_BootSector {
u8 jump[3];
u8 name[8];
u16 bytesPerSector;
u8 sectorsPerCluster;
u16 reservedSectors;
u8 unused0[3];
u16 unused1;
u8 media;
u16 unused2;
u16 sectorsPerTrack;
u16 headsPerCylinder;
u32 hiddenSectors;
u32 unused3;
u32 unused4;
u64 totalSectors;
u64 mftStart;
u64 mftMirrorStart;
u32 clustersPerFileRecord;
u32 clustersPerIndexBlock;
u64 serialNumber;
u32 checksum;
u8 bootloader[426];
u16 bootSignature;
};
struct NTFS_FileRecordHeader { // https://flatcap.github.io/linux-ntfs/ntfs/concepts/file_record.html
u32 magic;
u16 updateSequenceOffset; // Offset to the Update Sequence
u16 updateSequenceSize; // Size in words of Update Sequence (S)
u64 logSequence; // $LogFile Sequence Number (LSN): This is changed every time the record is modified.
u16 sequenceNumber; // Number of times this mft record has been reused. N.B. The increment (skipping zero) is done when the file is deleted. N.B. If this is set to zero it is left as zero.
u16 hardLinkCount; // Number of hard links, i.e. the number of directory entries referencing this record.
u16 firstAttributeOffset; // Offset to the first attribute
u16 inUse : 1; // 0x01
u16 isDirectory : 1; // 0x02
// 0x04 record is an extension (set for records in the $Extend directory)
// 0x08 special index present (set for non-directory records containing an index: $Secure, $ObjID, $Quota, $Reparse)
u32 usedSize; // Real size is a count of how many bytes of a record are actually used.
u32 allocatedSize; // Allocated size the file record takes on disk (multiple of cluster size).
u64 fileReference; // Base MFT record - File reference to the base FILE record
// When it is not zero, it is an MFT reference pointing to the base MFT record to which this record belongs. The Base Record contains information about the Extension Record. This information is stored in an ATTRIBUTE_LIST attribute.
u16 nextAttributeID; // The Attribute Id that will be assigned to the next Attribute added to this MFT Record. (Incremented each time it is used, Every time the MFT Record is reused this Id is set to zero, The first instance number is always 0.)
u16 unused; // [added in XP] Align to 4 byte boundary
u32 recordNumber; // [added in XP] Number of this MFT Record
};
struct NTFS_AttributeHeader {
u32 attributeType; // see https://flatcap.github.io/linux-ntfs/ntfs/attributes/index.html
u32 length;
u8 nonResident;
u8 nameLength;
u16 nameOffset;
u16 flags;
u16 attributeID;
};
struct NTFS_ResidentAttributeHeader : NTFS_AttributeHeader {
u32 attributeLength;
u16 attributeOffset;
u8 indexed;
u8 unused;
};
struct NTFS_FileNameAttributeHeader : NTFS_ResidentAttributeHeader {
//u64 fileReferenceNumber; // A reference consists of a 48-bit index into the mft and a 16-bit sequence number used to detect stale references.
u64 parentRecordNumber : 48; // low 48 bits
u64 sequenceNumber : 16;
u64 creationTime;
u64 modificationTime;
u64 metadataModificationTime;
u64 readTime;
u64 allocatedSize; // This information is not reliable - use the $DATA attribute's unnamed stream for the correct file sizes.
u64 realSize; // This information is not reliable - use the $DATA attribute's unnamed stream for the correct file sizes.
u32 flags;
u32 repase;
u8 fileNameLength;
u8 namespaceType;
u16 fileName[1];
};
struct NTFS_FileStandardInformationHeader : NTFS_AttributeHeader {
// source: https://flatcap.github.io/linux-ntfs/ntfs/attributes/standard_information.html
u64 creationTime; // file creation time
u64 modificationTime; // changed when file contents are modified.
u64 metadataModificationTime; // changed when MFT record is modified.
u64 readTime; // last read
u32 filePermissions; // DOS file permissions.
u32 maximumNumberOfVersions;
u32 versionNumber;
u32 classID; // [added in Windows 2000]
u32 ownerID; // [added in Windows 2000]
u32 securityID; // [added in Windows 2000]
u64 quotaCharged; // [added in Windows 2000]
u64 updateSequenceNumber; // [added in Windows 2000]
};
struct NTFS_NonResidentAttributeHeader : NTFS_AttributeHeader {
// source: https://flatcap.github.io/linux-ntfs/ntfs/concepts/attribute_header.html
// see: Non-Resident, No Name, and Non-Resident, Named
u64 firstCluster; // Starting VCN
u64 lastCluster; // Last VCN
u16 dataRunsOffset; // Offset to the data runs. If not named, this is always 0x40.
u16 compressionUnit; // Compression unit size (if file is compressed), 0 implies uncompressed
u32 unused; // Padding.
u64 attributeAllocated; // allocated size of the attribute (NOT THE FILE) This is the attribute size rounded up to the cluster size.
u64 attributeSize; // Logical "real" size of the attribute (NOT THE FILE)
u64 streamDataSize; // Initialized data size of the stream (Compressed data size / actual valid bytes)
};
struct NTFS_RunHeader {
u8 lengthFieldBytes : 4;
u8 offsetFieldBytes : 4;
};
#pragma pack(pop)
struct NTFS_MFT_Internal {
ArrayView<u8> mft_file;
ArrayView<u8> mft_buffer;
HANDLE handle;
s64 bytes_accessed;
s64 object_count;
string drive_label;
};
constexpr s64 NTFS_MFT_File_Record_Size = 1024; // File Entry Block
constexpr s64 NTFS_MFT_Files_Per_Buffer = 65536;
NTFS_MFT_Internal* new_ntfs_mft_internal () { // call with temp
NTFS_MFT_Internal* mft = New<NTFS_MFT_Internal>(true);
mft->mft_file = ArrayView<u8>(NTFS_MFT_File_Record_Size);
mft->mft_buffer = ArrayView<u8>(NTFS_MFT_File_Record_Size * NTFS_MFT_Files_Per_Buffer); // 64 MB
return mft;
}
enum class NTFS_File_Flags: u16 {
is_compressed = 0x0001, // (attribute->flags)
is_encrypted = 0x0002, // (attribute->flags)
is_sparse = 0x0004, // (attribute->flags)
// [DOS File Permissions] fileInfoAttribute->filePermissions
read_only_file = 0x0008,
hidden = 0x0010,
system = 0x0020,
archive = 0x0040,
device = 0x0080,
normal = 0x0100,
temporary = 0x0200,
sparse = 0x0400, // redundant
reparse_point = 0x0800,
compressed = 0x1000, // redundant
offline = 0x2000,
not_content_indexed = 0x4000,
encrypted = 0x8000, // redundant
};
force_inline NTFS_File_Flags operator | (NTFS_File_Flags a, NTFS_File_Flags b) {
return (NTFS_File_Flags)(((u16)a) | ((u16)b));
}
force_inline NTFS_File_Flags operator & (NTFS_File_Flags a, NTFS_File_Flags b) {
return (NTFS_File_Flags)(((u16)a) & ((u16)b));
}
force_inline NTFS_File_Flags& operator |= (NTFS_File_Flags& a, NTFS_File_Flags b) {
a = a | b;
return a;
}
force_inline NTFS_File_Flags operator ~ (NTFS_File_Flags a) {
return (NTFS_File_Flags)(~((u16)a));
}
// #TODO: I probably don't need this struct at all
struct NTFS_File {
u64 parent_id;
u64 record_id;
u16* name_data;
u64 file_modtime; // FILETIME
u64 file_size;
u8 name_count;
bool is_directory;
NTFS_File_Flags flags;
};
force_inline void add_file_permissions (NTFS_File* file, u32 permissions) {
if (permissions & 0x0001 == 0x0001) {
file->flags |= NTFS_File_Flags::read_only_file;
}
if (permissions & 0x0002 == 0x0002) {
file->flags |= NTFS_File_Flags::hidden;
}
if (permissions & 0x0004 == 0x0004) {
file->flags |= NTFS_File_Flags::system;
}
if (permissions & 0x0020 == 0x0020) {
file->flags |= NTFS_File_Flags::archive;
}
if (permissions & 0x0040 == 0x0040) {
file->flags |= NTFS_File_Flags::device;
}
if (permissions & 0x0080 == 0x0080) {
file->flags |= NTFS_File_Flags::normal;
}
if (permissions & 0x0100 == 0x0100) {
file->flags |= NTFS_File_Flags::temporary;
}
if (permissions & 0x0200 == 0x0200) {
file->flags |= NTFS_File_Flags::sparse;
}
if (permissions & 0x0400 == 0x0400) {
file->flags |= NTFS_File_Flags::reparse_point;
}
if (permissions & 0x0800 == 0x0800) {
file->flags |= NTFS_File_Flags::compressed;
}
if (permissions & 0x1000 == 0x1000) {
file->flags |= NTFS_File_Flags::offline;
}
if (permissions & 0x2000 == 0x2000) {
file->flags |= NTFS_File_Flags::not_content_indexed;
}
if (permissions & 0x4000 == 0x4000) {
file->flags |= NTFS_File_Flags::encrypted;
}
}
force_inline void add_flags (NTFS_File* file, u16 attribute_flags) {
if (attribute_flags & 0x0001 == 0x0001) {
file->flags |= NTFS_File_Flags::is_compressed;
}
if (attribute_flags & 0x4000 == 0x4000) {
file->flags |= NTFS_File_Flags::is_encrypted;
}
if (attribute_flags & 0x8000 == 0x8000) {
file->flags |= NTFS_File_Flags::is_sparse;
}
}
// #NTFS#MFT #API
// bool ntfs_mft_initialize (); // sets up thread group
// bool ntfs_mft_enumerate_drive (Win32_Drive* drive);
// bool ntfs_mft_enumeration_is_done ();
// void ntfs_mft_finish ();
// #NTFS#MFT#USNJrnl
// struct NTFS_USN_Journal {
// bool no_permission;
// HANDLE hVol;
// ArrayView<USN_Journal_Change> changes;
// };

15
lib/UI/Layout.h Normal file
View File

@ -0,0 +1,15 @@
struct Rect {
f32 x; f32 y; f32 w; f32 h;
};
force_inline bool operator==(const Rect& a, const Rect& b) {
return a.x == b.x && a.y == b.y && a.w == b.w && a.h == b.h;
}
Rect make_rect_int (s64 x, s64 y, s64 w, s64 h) {
return {(f32)x, (f32)y, (f32)w, (f32)h};
}
Rect make_rect (f32 x, f32 y, f32 w, f32 h) {
return {x, y, w, h};
}

View File

@ -0,0 +1,253 @@
//
// Copyright (C) 2016 David Carpenter
//
// Permission is hereby granted, free of charge,
// to any person obtaining a copy of this software
// and associated documentation files (the "Software"),
// to deal in the Software without restriction,
// including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
#ifndef _EVERYTHING_DLL_
#define _EVERYTHING_DLL_
#ifndef _INC_WINDOWS
#include <windows.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
// if not defined, version is 1.
#define EVERYTHING_SDK_VERSION 2
#define EVERYTHING_OK 0 // no error detected
#define EVERYTHING_ERROR_MEMORY 1 // out of memory.
#define EVERYTHING_ERROR_IPC 2 // Everything search client is not running
#define EVERYTHING_ERROR_REGISTERCLASSEX 3 // unable to register window class.
#define EVERYTHING_ERROR_CREATEWINDOW 4 // unable to create listening window
#define EVERYTHING_ERROR_CREATETHREAD 5 // unable to create listening thread
#define EVERYTHING_ERROR_INVALIDINDEX 6 // invalid index
#define EVERYTHING_ERROR_INVALIDCALL 7 // invalid call
#define EVERYTHING_ERROR_INVALIDREQUEST 8 // invalid request data, request data first.
#define EVERYTHING_ERROR_INVALIDPARAMETER 9 // bad parameter.
#define EVERYTHING_SORT_NAME_ASCENDING 1
#define EVERYTHING_SORT_NAME_DESCENDING 2
#define EVERYTHING_SORT_PATH_ASCENDING 3
#define EVERYTHING_SORT_PATH_DESCENDING 4
#define EVERYTHING_SORT_SIZE_ASCENDING 5
#define EVERYTHING_SORT_SIZE_DESCENDING 6
#define EVERYTHING_SORT_EXTENSION_ASCENDING 7
#define EVERYTHING_SORT_EXTENSION_DESCENDING 8
#define EVERYTHING_SORT_TYPE_NAME_ASCENDING 9
#define EVERYTHING_SORT_TYPE_NAME_DESCENDING 10
#define EVERYTHING_SORT_DATE_CREATED_ASCENDING 11
#define EVERYTHING_SORT_DATE_CREATED_DESCENDING 12
#define EVERYTHING_SORT_DATE_MODIFIED_ASCENDING 13
#define EVERYTHING_SORT_DATE_MODIFIED_DESCENDING 14
#define EVERYTHING_SORT_ATTRIBUTES_ASCENDING 15
#define EVERYTHING_SORT_ATTRIBUTES_DESCENDING 16
#define EVERYTHING_SORT_FILE_LIST_FILENAME_ASCENDING 17
#define EVERYTHING_SORT_FILE_LIST_FILENAME_DESCENDING 18
#define EVERYTHING_SORT_RUN_COUNT_ASCENDING 19
#define EVERYTHING_SORT_RUN_COUNT_DESCENDING 20
#define EVERYTHING_SORT_DATE_RECENTLY_CHANGED_ASCENDING 21
#define EVERYTHING_SORT_DATE_RECENTLY_CHANGED_DESCENDING 22
#define EVERYTHING_SORT_DATE_ACCESSED_ASCENDING 23
#define EVERYTHING_SORT_DATE_ACCESSED_DESCENDING 24
#define EVERYTHING_SORT_DATE_RUN_ASCENDING 25
#define EVERYTHING_SORT_DATE_RUN_DESCENDING 26
#define EVERYTHING_REQUEST_FILE_NAME 0x00000001
#define EVERYTHING_REQUEST_PATH 0x00000002
#define EVERYTHING_REQUEST_FULL_PATH_AND_FILE_NAME 0x00000004
#define EVERYTHING_REQUEST_EXTENSION 0x00000008
#define EVERYTHING_REQUEST_SIZE 0x00000010
#define EVERYTHING_REQUEST_DATE_CREATED 0x00000020
#define EVERYTHING_REQUEST_DATE_MODIFIED 0x00000040
#define EVERYTHING_REQUEST_DATE_ACCESSED 0x00000080
#define EVERYTHING_REQUEST_ATTRIBUTES 0x00000100
#define EVERYTHING_REQUEST_FILE_LIST_FILE_NAME 0x00000200
#define EVERYTHING_REQUEST_RUN_COUNT 0x00000400
#define EVERYTHING_REQUEST_DATE_RUN 0x00000800
#define EVERYTHING_REQUEST_DATE_RECENTLY_CHANGED 0x00001000
#define EVERYTHING_REQUEST_HIGHLIGHTED_FILE_NAME 0x00002000
#define EVERYTHING_REQUEST_HIGHLIGHTED_PATH 0x00004000
#define EVERYTHING_REQUEST_HIGHLIGHTED_FULL_PATH_AND_FILE_NAME 0x00008000
#define EVERYTHING_TARGET_MACHINE_X86 1
#define EVERYTHING_TARGET_MACHINE_X64 2
#define EVERYTHING_TARGET_MACHINE_ARM 3
#ifndef EVERYTHINGAPI
#define EVERYTHINGAPI __stdcall
#endif
#ifndef EVERYTHINGUSERAPI
#define EVERYTHINGUSERAPI __declspec(dllimport)
#endif
// write search state
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetSearchW(LPCWSTR lpString);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetSearchA(LPCSTR lpString);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetMatchPath(BOOL bEnable);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetMatchCase(BOOL bEnable);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetMatchWholeWord(BOOL bEnable);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetRegex(BOOL bEnable);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetMax(DWORD dwMax);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetOffset(DWORD dwOffset);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetReplyWindow(HWND hWnd);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetReplyID(DWORD dwId);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetSort(DWORD dwSort); // Everything 1.4.1
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SetRequestFlags(DWORD dwRequestFlags); // Everything 1.4.1
// read search state
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetMatchPath(void);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetMatchCase(void);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetMatchWholeWord(void);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetRegex(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetMax(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetOffset(void);
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetSearchA(void);
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetSearchW(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetLastError(void);
EVERYTHINGUSERAPI HWND EVERYTHINGAPI Everything_GetReplyWindow(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetReplyID(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetSort(void); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetRequestFlags(void); // Everything 1.4.1
// execute query
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_QueryA(BOOL bWait);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_QueryW(BOOL bWait);
// query reply
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsQueryReply(UINT message,WPARAM wParam,LPARAM lParam,DWORD dwId);
// write result state
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_SortResultsByPath(void);
// read result state
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetNumFileResults(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetNumFolderResults(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetNumResults(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetTotFileResults(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetTotFolderResults(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetTotResults(void);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsVolumeResult(DWORD dwIndex);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsFolderResult(DWORD dwIndex);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsFileResult(DWORD dwIndex);
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultFileNameW(DWORD dwIndex);
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultFileNameA(DWORD dwIndex);
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultPathW(DWORD dwIndex);
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultPathA(DWORD dwIndex);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetResultFullPathNameA(DWORD dwIndex,LPSTR buf,DWORD bufsize);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetResultFullPathNameW(DWORD dwIndex,LPWSTR wbuf,DWORD wbuf_size_in_wchars);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetResultListSort(void); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetResultListRequestFlags(void); // Everything 1.4.1
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultExtensionW(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultExtensionA(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetResultSize(DWORD dwIndex,LARGE_INTEGER *lpSize); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetResultDateCreated(DWORD dwIndex,FILETIME *lpDateCreated); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetResultDateModified(DWORD dwIndex,FILETIME *lpDateModified); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetResultDateAccessed(DWORD dwIndex,FILETIME *lpDateAccessed); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetResultAttributes(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultFileListFileNameW(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultFileListFileNameA(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetResultRunCount(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetResultDateRun(DWORD dwIndex,FILETIME *lpDateRun);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetResultDateRecentlyChanged(DWORD dwIndex,FILETIME *lpDateRecentlyChanged);
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultHighlightedFileNameW(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultHighlightedFileNameA(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultHighlightedPathW(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultHighlightedPathA(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetResultHighlightedFullPathAndFileNameW(DWORD dwIndex); // Everything 1.4.1
EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetResultHighlightedFullPathAndFileNameA(DWORD dwIndex); // Everything 1.4.1
// reset state and free any allocated memory
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_Reset(void);
EVERYTHINGUSERAPI void EVERYTHINGAPI Everything_CleanUp(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetMajorVersion(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetMinorVersion(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetRevision(void);
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetBuildNumber(void);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_Exit(void);
UINT EVERYTHINGAPI Everything_MSIExitAndStopService(void *msihandle);
UINT EVERYTHINGAPI Everything_MSIStartService(void *msihandle);
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsDBLoaded(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsAdmin(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsAppData(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_RebuildDB(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_UpdateAllFolderIndexes(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_SaveDB(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_SaveRunHistory(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_DeleteRunHistory(void); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetTargetMachine(void); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsFastSort(DWORD sortType); // Everything 1.4.1.859
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_IsFileInfoIndexed(DWORD fileInfoType); // Everything 1.4.1.859
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetRunCountFromFileNameW(LPCWSTR lpFileName); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetRunCountFromFileNameA(LPCSTR lpFileName); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_SetRunCountFromFileNameW(LPCWSTR lpFileName,DWORD dwRunCount); // Everything 1.4.1
EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_SetRunCountFromFileNameA(LPCSTR lpFileName,DWORD dwRunCount); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_IncRunCountFromFileNameW(LPCWSTR lpFileName); // Everything 1.4.1
EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_IncRunCountFromFileNameA(LPCSTR lpFileName); // Everything 1.4.1
#ifdef UNICODE
#define Everything_SetSearch Everything_SetSearchW
#define Everything_GetSearch Everything_GetSearchW
#define Everything_Query Everything_QueryW
#define Everything_Query2 Everything_Query2W
#define Everything_GetResultFileName Everything_GetResultFileNameW
#define Everything_GetResultPath Everything_GetResultPathW
#define Everything_GetResultFullPathName Everything_GetResultFullPathNameW
#define Everything_GetResultExtension Everything_GetResultExtensionW
#define Everything_GetResultFileListFileName Everything_GetResultFileListFileNameW
#define Everything_GetResultHighlightedFileName Everything_GetResultHighlightedFileNameW
#define Everything_GetResultHighlightedPath Everything_GetResultHighlightedPathW
#define Everything_GetResultHighlightedFullPathAndFileName Everything_GetResultHighlightedFullPathAndFileNameW
#define Everything_GetRunCountFromFileName Everything_GetRunCountFromFileNameW
#define Everything_SetRunCountFromFileName Everything_SetRunCountFromFileNameW
#define Everything_IncRunCountFromFileName Everything_IncRunCountFromFileNameW
#else
#define Everything_SetSearch Everything_SetSearchA
#define Everything_GetSearch Everything_GetSearchA
#define Everything_Query Everything_QueryA
#define Everything_Query2 Everything_Query2A
#define Everything_GetResultFileName Everything_GetResultFileNameA
#define Everything_GetResultPath Everything_GetResultPathA
#define Everything_GetResultFullPathName Everything_GetResultFullPathNameA
#define Everything_GetResultExtension Everything_GetResultExtensionA
#define Everything_GetResultFileListFileName Everything_GetResultFileListFileNameA
#define Everything_GetResultHighlightedFileName Everything_GetResultHighlightedFileNameA
#define Everything_GetResultHighlightedPath Everything_GetResultHighlightedPathA
#define Everything_GetResultHighlightedFullPathAndFileName Everything_GetResultHighlightedFullPathAndFileNameA
#define Everything_GetRunCountFromFileName Everything_GetRunCountFromFileNameA
#define Everything_SetRunCountFromFileName Everything_SetRunCountFromFileNameA
#define Everything_IncRunCountFromFileName Everything_IncRunCountFromFileNameA
#endif
#ifdef __cplusplus
}
#endif
#endif

Binary file not shown.

Binary file not shown.

147
lib/third_party/dear-imgui/imconfig.h vendored Normal file
View File

@ -0,0 +1,147 @@
//-----------------------------------------------------------------------------
// DEAR IMGUI COMPILE-TIME OPTIONS
// Runtime options (clipboard callbacks, enabling various features, etc.) can generally be set via the ImGuiIO structure.
// You can use ImGui::SetAllocatorFunctions() before calling ImGui::CreateContext() to rewire memory allocation functions.
//-----------------------------------------------------------------------------
// A) You may edit imconfig.h (and not overwrite it when updating Dear ImGui, or maintain a patch/rebased branch with your modifications to it)
// B) or '#define IMGUI_USER_CONFIG "my_imgui_config.h"' in your project and then add directives in your own file without touching this template.
//-----------------------------------------------------------------------------
// You need to make sure that configuration settings are defined consistently _everywhere_ Dear ImGui is used, which include the imgui*.cpp
// files but also _any_ of your code that uses Dear ImGui. This is because some compile-time options have an affect on data structures.
// Defining those options in imconfig.h will ensure every compilation unit gets to see the same data structure layouts.
// Call IMGUI_CHECKVERSION() from your .cpp file to verify that the data structures your files are using are matching the ones imgui.cpp is using.
//-----------------------------------------------------------------------------
#pragma once
//---- Define assertion handler. Defaults to calling assert().
// - If your macro uses multiple statements, make sure is enclosed in a 'do { .. } while (0)' block so it can be used as a single statement.
// - Compiling with NDEBUG will usually strip out assert() to nothing, which is NOT recommended because we use asserts to notify of programmer mistakes.
//#define IM_ASSERT(_EXPR) MyAssert(_EXPR)
//#define IM_ASSERT(_EXPR) ((void)(_EXPR)) // Disable asserts
//---- Define attributes of all API symbols declarations, e.g. for DLL under Windows
// Using Dear ImGui via a shared library is not recommended, because of function call overhead and because we don't guarantee backward nor forward ABI compatibility.
// - Windows DLL users: heaps and globals are not shared across DLL boundaries! You will need to call SetCurrentContext() + SetAllocatorFunctions()
// for each static/DLL boundary you are calling from. Read "Context and Memory Allocators" section of imgui.cpp for more details.
//#define IMGUI_API __declspec(dllexport) // MSVC Windows: DLL export
//#define IMGUI_API __declspec(dllimport) // MSVC Windows: DLL import
//#define IMGUI_API __attribute__((visibility("default"))) // GCC/Clang: override visibility when set is hidden
//---- Don't define obsolete functions/enums/behaviors. Consider enabling from time to time after updating to clean your code of obsolete function/names.
//#define IMGUI_DISABLE_OBSOLETE_FUNCTIONS
//---- Disable all of Dear ImGui or don't implement standard windows/tools.
// It is very strongly recommended to NOT disable the demo windows and debug tool during development. They are extremely useful in day to day work. Please read comments in imgui_demo.cpp.
//#define IMGUI_DISABLE // Disable everything: all headers and source files will be empty.
//#define IMGUI_DISABLE_DEMO_WINDOWS // Disable demo windows: ShowDemoWindow()/ShowStyleEditor() will be empty.
//#define IMGUI_DISABLE_DEBUG_TOOLS // Disable metrics/debugger and other debug tools: ShowMetricsWindow(), ShowDebugLogWindow() and ShowIDStackToolWindow() will be empty.
//---- Don't implement some functions to reduce linkage requirements.
//#define IMGUI_DISABLE_WIN32_DEFAULT_CLIPBOARD_FUNCTIONS // [Win32] Don't implement default clipboard handler. Won't use and link with OpenClipboard/GetClipboardData/CloseClipboard etc. (user32.lib/.a, kernel32.lib/.a)
//#define IMGUI_ENABLE_WIN32_DEFAULT_IME_FUNCTIONS // [Win32] [Default with Visual Studio] Implement default IME handler (require imm32.lib/.a, auto-link for Visual Studio, -limm32 on command-line for MinGW)
//#define IMGUI_DISABLE_WIN32_DEFAULT_IME_FUNCTIONS // [Win32] [Default with non-Visual Studio compilers] Don't implement default IME handler (won't require imm32.lib/.a)
//#define IMGUI_DISABLE_WIN32_FUNCTIONS // [Win32] Won't use and link with any Win32 function (clipboard, IME).
//#define IMGUI_ENABLE_OSX_DEFAULT_CLIPBOARD_FUNCTIONS // [OSX] Implement default OSX clipboard handler (need to link with '-framework ApplicationServices', this is why this is not the default).
//#define IMGUI_DISABLE_DEFAULT_SHELL_FUNCTIONS // Don't implement default platform_io.Platform_OpenInShellFn() handler (Win32: ShellExecute(), require shell32.lib/.a, Mac/Linux: use system("")).
//#define IMGUI_DISABLE_DEFAULT_FORMAT_FUNCTIONS // Don't implement ImFormatString/ImFormatStringV so you can implement them yourself (e.g. if you don't want to link with vsnprintf)
//#define IMGUI_DISABLE_DEFAULT_MATH_FUNCTIONS // Don't implement ImFabs/ImSqrt/ImPow/ImFmod/ImCos/ImSin/ImAcos/ImAtan2 so you can implement them yourself.
//#define IMGUI_DISABLE_FILE_FUNCTIONS // Don't implement ImFileOpen/ImFileClose/ImFileRead/ImFileWrite and ImFileHandle at all (replace them with dummies)
//#define IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS // Don't implement ImFileOpen/ImFileClose/ImFileRead/ImFileWrite and ImFileHandle so you can implement them yourself if you don't want to link with fopen/fclose/fread/fwrite. This will also disable the LogToTTY() function.
//#define IMGUI_DISABLE_DEFAULT_ALLOCATORS // Don't implement default allocators calling malloc()/free() to avoid linking with them. You will need to call ImGui::SetAllocatorFunctions().
//#define IMGUI_DISABLE_DEFAULT_FONT // Disable default embedded font (ProggyClean.ttf), remove ~9.5 KB from output binary. AddFontDefault() will assert.
//#define IMGUI_DISABLE_SSE // Disable use of SSE intrinsics even if available
//---- Enable Test Engine / Automation features.
//#define IMGUI_ENABLE_TEST_ENGINE // Enable imgui_test_engine hooks. Generally set automatically by include "imgui_te_config.h", see Test Engine for details.
//---- Include imgui_user.h at the end of imgui.h as a convenience
// May be convenient for some users to only explicitly include vanilla imgui.h and have extra stuff included.
//#define IMGUI_INCLUDE_IMGUI_USER_H
//#define IMGUI_USER_H_FILENAME "my_folder/my_imgui_user.h"
//---- Pack vertex colors as BGRA8 instead of RGBA8 (to avoid converting from one to another). Need dedicated backend support.
//#define IMGUI_USE_BGRA_PACKED_COLOR
//---- Use legacy CRC32-adler tables (used before 1.91.6), in order to preserve old .ini data that you cannot afford to invalidate.
//#define IMGUI_USE_LEGACY_CRC32_ADLER
//---- Use 32-bit for ImWchar (default is 16-bit) to support Unicode planes 1-16. (e.g. point beyond 0xFFFF like emoticons, dingbats, symbols, shapes, ancient languages, etc...)
//#define IMGUI_USE_WCHAR32
//---- Avoid multiple STB libraries implementations, or redefine path/filenames to prioritize another version
// By default the embedded implementations are declared static and not available outside of Dear ImGui sources files.
//#define IMGUI_STB_TRUETYPE_FILENAME "my_folder/stb_truetype.h"
//#define IMGUI_STB_RECT_PACK_FILENAME "my_folder/stb_rect_pack.h"
//#define IMGUI_STB_SPRINTF_FILENAME "my_folder/stb_sprintf.h" // only used if IMGUI_USE_STB_SPRINTF is defined.
//#define IMGUI_DISABLE_STB_TRUETYPE_IMPLEMENTATION
//#define IMGUI_DISABLE_STB_RECT_PACK_IMPLEMENTATION
//#define IMGUI_DISABLE_STB_SPRINTF_IMPLEMENTATION // only disabled if IMGUI_USE_STB_SPRINTF is defined.
//---- Use stb_sprintf.h for a faster implementation of vsnprintf instead of the one from libc (unless IMGUI_DISABLE_DEFAULT_FORMAT_FUNCTIONS is defined)
// Compatibility checks of arguments and formats done by clang and GCC will be disabled in order to support the extra formats provided by stb_sprintf.h.
//#define IMGUI_USE_STB_SPRINTF
//---- Use FreeType to build and rasterize the font atlas (instead of stb_truetype which is embedded by default in Dear ImGui)
// Requires FreeType headers to be available in the include path. Requires program to be compiled with 'misc/freetype/imgui_freetype.cpp' (in this repository) + the FreeType library (not provided).
// Note that imgui_freetype.cpp may be used _without_ this define, if you manually call ImFontAtlas::SetFontLoader(). The define is simply a convenience.
// On Windows you may use vcpkg with 'vcpkg install freetype --triplet=x64-windows' + 'vcpkg integrate install'.
//#define IMGUI_ENABLE_FREETYPE
//---- Use FreeType + plutosvg or lunasvg to render OpenType SVG fonts (SVGinOT)
// Only works in combination with IMGUI_ENABLE_FREETYPE.
// - plutosvg is currently easier to install, as e.g. it is part of vcpkg. It will support more fonts and may load them faster. See misc/freetype/README for instructions.
// - Both require headers to be available in the include path + program to be linked with the library code (not provided).
// - (note: lunasvg implementation is based on Freetype's rsvg-port.c which is licensed under CeCILL-C Free Software License Agreement)
//#define IMGUI_ENABLE_FREETYPE_PLUTOSVG
//#define IMGUI_ENABLE_FREETYPE_LUNASVG
//---- Use stb_truetype to build and rasterize the font atlas (default)
// The only purpose of this define is if you want force compilation of the stb_truetype backend ALONG with the FreeType backend.
//#define IMGUI_ENABLE_STB_TRUETYPE
//---- Define constructor and implicit cast operators to convert back<>forth between your math types and ImVec2/ImVec4.
// This will be inlined as part of ImVec2 and ImVec4 class declarations.
/*
#define IM_VEC2_CLASS_EXTRA \
constexpr ImVec2(const MyVec2& f) : x(f.x), y(f.y) {} \
operator MyVec2() const { return MyVec2(x,y); }
#define IM_VEC4_CLASS_EXTRA \
constexpr ImVec4(const MyVec4& f) : x(f.x), y(f.y), z(f.z), w(f.w) {} \
operator MyVec4() const { return MyVec4(x,y,z,w); }
*/
//---- ...Or use Dear ImGui's own very basic math operators.
//#define IMGUI_DEFINE_MATH_OPERATORS
//---- Use 32-bit vertex indices (default is 16-bit) is one way to allow large meshes with more than 64K vertices.
// Your renderer backend will need to support it (most example renderer backends support both 16/32-bit indices).
// Another way to allow large meshes while keeping 16-bit indices is to handle ImDrawCmd::VtxOffset in your renderer.
// Read about ImGuiBackendFlags_RendererHasVtxOffset for details.
//#define ImDrawIdx unsigned int
//---- Override ImDrawCallback signature (will need to modify renderer backends accordingly)
//struct ImDrawList;
//struct ImDrawCmd;
//typedef void (*MyImDrawCallback)(const ImDrawList* draw_list, const ImDrawCmd* cmd, void* my_renderer_user_data);
//#define ImDrawCallback MyImDrawCallback
//---- Debug Tools: Macro to break in Debugger (we provide a default implementation of this in the codebase)
// (use 'Metrics->Tools->Item Picker' to pick widgets with the mouse and break into them for easy debugging.)
//#define IM_DEBUG_BREAK IM_ASSERT(0)
//#define IM_DEBUG_BREAK __debugbreak()
//---- Debug Tools: Enable highlight ID conflicts _before_ hovering items. When io.ConfigDebugHighlightIdConflicts is set.
// (THIS WILL SLOW DOWN DEAR IMGUI. Only use occasionally and disable after use)
//#define IMGUI_DEBUG_HIGHLIGHT_ALL_ID_CONFLICTS
//---- Debug Tools: Enable slower asserts
//#define IMGUI_DEBUG_PARANOID
//---- Tip: You can add extra functions within the ImGui:: namespace from anywhere (e.g. your own sources/header files)
/*
namespace ImGui
{
void MyFunction(const char* name, MyMatrix44* mtx);
}
*/

24007
lib/third_party/dear-imgui/imgui.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

4445
lib/third_party/dear-imgui/imgui.h vendored Normal file

File diff suppressed because it is too large Load Diff

11196
lib/third_party/dear-imgui/imgui_demo.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

6371
lib/third_party/dear-imgui/imgui_draw.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,837 @@
// dear imgui: Renderer Backend for DirectX11
// This needs to be used along with a Platform Backend (e.g. Win32)
// Implemented features:
// [X] Renderer: User texture binding. Use 'ID3D11ShaderResourceView*' as texture identifier. Read the FAQ about ImTextureID/ImTextureRef!
// [X] Renderer: Large meshes support (64k+ vertices) even with 16-bit indices (ImGuiBackendFlags_RendererHasVtxOffset).
// [X] Renderer: Texture updates support for dynamic font atlas (ImGuiBackendFlags_RendererHasTextures).
// [X] Renderer: Expose selected render state for draw callbacks to use. Access in '(ImGui_ImplXXXX_RenderState*)GetPlatformIO().Renderer_RenderState'.
// [X] Renderer: Multi-viewport support (multiple windows). Enable with 'io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable'.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// Learn about Dear ImGui:
// - FAQ https://dearimgui.com/faq
// - Getting Started https://dearimgui.com/getting-started
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
// - Introduction, links and more at the top of imgui.cpp
// CHANGELOG
// (minor and older changes stripped away, please see git history for details)
// 2025-XX-XX: Platform: Added support for multiple windows via the ImGuiPlatformIO interface.
// 2025-09-18: Call platform_io.ClearRendererHandlers() on shutdown.
// 2025-06-11: DirectX11: Added support for ImGuiBackendFlags_RendererHasTextures, for dynamic font atlas.
// 2025-05-07: DirectX11: Honor draw_data->FramebufferScale to allow for custom backends and experiment using it (consistently with other renderer backends, even though in normal condition it is not set under Windows).
// 2025-02-24: [Docking] Added undocumented ImGui_ImplDX11_SetSwapChainDescs() to configure swap chain creation for secondary viewports.
// 2025-01-06: DirectX11: Expose VertexConstantBuffer in ImGui_ImplDX11_RenderState. Reset projection matrix in ImDrawCallback_ResetRenderState handler.
// 2024-10-07: DirectX11: Changed default texture sampler to Clamp instead of Repeat/Wrap.
// 2024-10-07: DirectX11: Expose selected render state in ImGui_ImplDX11_RenderState, which you can access in 'void* platform_io.Renderer_RenderState' during draw callbacks.
// 2022-10-11: Using 'nullptr' instead of 'NULL' as per our switch to C++11.
// 2021-06-29: Reorganized backend to pull data from a single structure to facilitate usage with multiple-contexts (all g_XXXX access changed to bd->XXXX).
// 2021-05-19: DirectX11: Replaced direct access to ImDrawCmd::TextureId with a call to ImDrawCmd::GetTexID(). (will become a requirement)
// 2021-02-18: DirectX11: Change blending equation to preserve alpha in output buffer.
// 2019-08-01: DirectX11: Fixed code querying the Geometry Shader state (would generally error with Debug layer enabled).
// 2019-07-21: DirectX11: Backup, clear and restore Geometry Shader is any is bound when calling ImGui_ImplDX11_RenderDrawData. Clearing Hull/Domain/Compute shaders without backup/restore.
// 2019-05-29: DirectX11: Added support for large mesh (64K+ vertices), enable ImGuiBackendFlags_RendererHasVtxOffset flag.
// 2019-04-30: DirectX11: Added support for special ImDrawCallback_ResetRenderState callback to reset render state.
// 2018-12-03: Misc: Added #pragma comment statement to automatically link with d3dcompiler.lib when using D3DCompile().
// 2018-11-30: Misc: Setting up io.BackendRendererName so it can be displayed in the About Window.
// 2018-08-01: DirectX11: Querying for IDXGIFactory instead of IDXGIFactory1 to increase compatibility.
// 2018-07-13: DirectX11: Fixed unreleased resources in Init and Shutdown functions.
// 2018-06-08: Misc: Extracted imgui_impl_dx11.cpp/.h away from the old combined DX11+Win32 example.
// 2018-06-08: DirectX11: Use draw_data->DisplayPos and draw_data->DisplaySize to setup projection matrix and clipping rectangle.
// 2018-02-16: Misc: Obsoleted the io.RenderDrawListsFn callback and exposed ImGui_ImplDX11_RenderDrawData() in the .h file so you can call it yourself.
// 2018-02-06: Misc: Removed call to ImGui::Shutdown() which is not available from 1.60 WIP, user needs to call CreateContext/DestroyContext themselves.
// 2016-05-07: DirectX11: Disabling depth-write.
#include "imgui.h"
#ifndef IMGUI_DISABLE
#include "imgui_impl_dx11.h"
// DirectX
#include <stdio.h>
#include <d3d11.h>
#include <d3dcompiler.h>
#ifdef _MSC_VER
#pragma comment(lib, "d3dcompiler") // Automatically link with d3dcompiler.lib as we are using D3DCompile() below.
#endif
// Clang/GCC warnings with -Weverything
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wold-style-cast" // warning: use of old-style cast // yes, they are more terse.
#pragma clang diagnostic ignored "-Wsign-conversion" // warning: implicit conversion changes signedness
#endif
// DirectX11 data
struct ImGui_ImplDX11_Texture
{
ID3D11Texture2D* pTexture;
ID3D11ShaderResourceView* pTextureView;
};
struct ImGui_ImplDX11_Data
{
ID3D11Device* pd3dDevice;
ID3D11DeviceContext* pd3dDeviceContext;
IDXGIFactory* pFactory;
ID3D11Buffer* pVB;
ID3D11Buffer* pIB;
ID3D11VertexShader* pVertexShader;
ID3D11InputLayout* pInputLayout;
ID3D11Buffer* pVertexConstantBuffer;
ID3D11PixelShader* pPixelShader;
ID3D11SamplerState* pTexSamplerLinear;
ID3D11RasterizerState* pRasterizerState;
ID3D11BlendState* pBlendState;
ID3D11DepthStencilState* pDepthStencilState;
int VertexBufferSize;
int IndexBufferSize;
ImVector<DXGI_SWAP_CHAIN_DESC> SwapChainDescsForViewports;
ImGui_ImplDX11_Data() { memset((void*)this, 0, sizeof(*this)); VertexBufferSize = 5000; IndexBufferSize = 10000; }
};
struct VERTEX_CONSTANT_BUFFER_DX11
{
float mvp[4][4];
};
// Backend data stored in io.BackendRendererUserData to allow support for multiple Dear ImGui contexts
// It is STRONGLY preferred that you use docking branch with multi-viewports (== single Dear ImGui context + multiple windows) instead of multiple Dear ImGui contexts.
static ImGui_ImplDX11_Data* ImGui_ImplDX11_GetBackendData()
{
return ImGui::GetCurrentContext() ? (ImGui_ImplDX11_Data*)ImGui::GetIO().BackendRendererUserData : nullptr;
}
// Forward Declarations
static void ImGui_ImplDX11_InitMultiViewportSupport();
static void ImGui_ImplDX11_ShutdownMultiViewportSupport();
// Functions
static void ImGui_ImplDX11_SetupRenderState(const ImDrawData* draw_data, ID3D11DeviceContext* device_ctx)
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
// Setup viewport
D3D11_VIEWPORT vp = {};
vp.Width = draw_data->DisplaySize.x * draw_data->FramebufferScale.x;
vp.Height = draw_data->DisplaySize.y * draw_data->FramebufferScale.y;
vp.MinDepth = 0.0f;
vp.MaxDepth = 1.0f;
vp.TopLeftX = vp.TopLeftY = 0;
device_ctx->RSSetViewports(1, &vp);
// Setup orthographic projection matrix into our constant buffer
// Our visible imgui space lies from draw_data->DisplayPos (top left) to draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayPos is (0,0) for single viewport apps.
D3D11_MAPPED_SUBRESOURCE mapped_resource;
if (device_ctx->Map(bd->pVertexConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped_resource) == S_OK)
{
VERTEX_CONSTANT_BUFFER_DX11* constant_buffer = (VERTEX_CONSTANT_BUFFER_DX11*)mapped_resource.pData;
float L = draw_data->DisplayPos.x;
float R = draw_data->DisplayPos.x + draw_data->DisplaySize.x;
float T = draw_data->DisplayPos.y;
float B = draw_data->DisplayPos.y + draw_data->DisplaySize.y;
float mvp[4][4] =
{
{ 2.0f/(R-L), 0.0f, 0.0f, 0.0f },
{ 0.0f, 2.0f/(T-B), 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.5f, 0.0f },
{ (R+L)/(L-R), (T+B)/(B-T), 0.5f, 1.0f },
};
memcpy(&constant_buffer->mvp, mvp, sizeof(mvp));
device_ctx->Unmap(bd->pVertexConstantBuffer, 0);
}
// Setup shader and vertex buffers
unsigned int stride = sizeof(ImDrawVert);
unsigned int offset = 0;
device_ctx->IASetInputLayout(bd->pInputLayout);
device_ctx->IASetVertexBuffers(0, 1, &bd->pVB, &stride, &offset);
device_ctx->IASetIndexBuffer(bd->pIB, sizeof(ImDrawIdx) == 2 ? DXGI_FORMAT_R16_UINT : DXGI_FORMAT_R32_UINT, 0);
device_ctx->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
device_ctx->VSSetShader(bd->pVertexShader, nullptr, 0);
device_ctx->VSSetConstantBuffers(0, 1, &bd->pVertexConstantBuffer);
device_ctx->PSSetShader(bd->pPixelShader, nullptr, 0);
device_ctx->PSSetSamplers(0, 1, &bd->pTexSamplerLinear);
device_ctx->GSSetShader(nullptr, nullptr, 0);
device_ctx->HSSetShader(nullptr, nullptr, 0); // In theory we should backup and restore this as well.. very infrequently used..
device_ctx->DSSetShader(nullptr, nullptr, 0); // In theory we should backup and restore this as well.. very infrequently used..
device_ctx->CSSetShader(nullptr, nullptr, 0); // In theory we should backup and restore this as well.. very infrequently used..
// Setup render state
const float blend_factor[4] = { 0.f, 0.f, 0.f, 0.f };
device_ctx->OMSetBlendState(bd->pBlendState, blend_factor, 0xffffffff);
device_ctx->OMSetDepthStencilState(bd->pDepthStencilState, 0);
device_ctx->RSSetState(bd->pRasterizerState);
}
// Render function
void ImGui_ImplDX11_RenderDrawData(ImDrawData* draw_data)
{
// Avoid rendering when minimized
if (draw_data->DisplaySize.x <= 0.0f || draw_data->DisplaySize.y <= 0.0f)
return;
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
ID3D11DeviceContext* device = bd->pd3dDeviceContext;
// Catch up with texture updates. Most of the times, the list will have 1 element with an OK status, aka nothing to do.
// (This almost always points to ImGui::GetPlatformIO().Textures[] but is part of ImDrawData to allow overriding or disabling texture updates).
if (draw_data->Textures != nullptr)
for (ImTextureData* tex : *draw_data->Textures)
if (tex->Status != ImTextureStatus_OK)
ImGui_ImplDX11_UpdateTexture(tex);
// Create and grow vertex/index buffers if needed
if (!bd->pVB || bd->VertexBufferSize < draw_data->TotalVtxCount)
{
if (bd->pVB) { bd->pVB->Release(); bd->pVB = nullptr; }
bd->VertexBufferSize = draw_data->TotalVtxCount + 5000;
D3D11_BUFFER_DESC desc = {};
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.ByteWidth = bd->VertexBufferSize * sizeof(ImDrawVert);
desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0;
if (bd->pd3dDevice->CreateBuffer(&desc, nullptr, &bd->pVB) < 0)
return;
}
if (!bd->pIB || bd->IndexBufferSize < draw_data->TotalIdxCount)
{
if (bd->pIB) { bd->pIB->Release(); bd->pIB = nullptr; }
bd->IndexBufferSize = draw_data->TotalIdxCount + 10000;
D3D11_BUFFER_DESC desc = {};
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.ByteWidth = bd->IndexBufferSize * sizeof(ImDrawIdx);
desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
if (bd->pd3dDevice->CreateBuffer(&desc, nullptr, &bd->pIB) < 0)
return;
}
// Upload vertex/index data into a single contiguous GPU buffer
D3D11_MAPPED_SUBRESOURCE vtx_resource, idx_resource;
if (device->Map(bd->pVB, 0, D3D11_MAP_WRITE_DISCARD, 0, &vtx_resource) != S_OK)
return;
if (device->Map(bd->pIB, 0, D3D11_MAP_WRITE_DISCARD, 0, &idx_resource) != S_OK)
return;
ImDrawVert* vtx_dst = (ImDrawVert*)vtx_resource.pData;
ImDrawIdx* idx_dst = (ImDrawIdx*)idx_resource.pData;
for (const ImDrawList* draw_list : draw_data->CmdLists)
{
memcpy(vtx_dst, draw_list->VtxBuffer.Data, draw_list->VtxBuffer.Size * sizeof(ImDrawVert));
memcpy(idx_dst, draw_list->IdxBuffer.Data, draw_list->IdxBuffer.Size * sizeof(ImDrawIdx));
vtx_dst += draw_list->VtxBuffer.Size;
idx_dst += draw_list->IdxBuffer.Size;
}
device->Unmap(bd->pVB, 0);
device->Unmap(bd->pIB, 0);
// Backup DX state that will be modified to restore it afterwards (unfortunately this is very ugly looking and verbose. Close your eyes!)
struct BACKUP_DX11_STATE
{
UINT ScissorRectsCount, ViewportsCount;
D3D11_RECT ScissorRects[D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE];
D3D11_VIEWPORT Viewports[D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE];
ID3D11RasterizerState* RS;
ID3D11BlendState* BlendState;
FLOAT BlendFactor[4];
UINT SampleMask;
UINT StencilRef;
ID3D11DepthStencilState* DepthStencilState;
ID3D11ShaderResourceView* PSShaderResource;
ID3D11SamplerState* PSSampler;
ID3D11PixelShader* PS;
ID3D11VertexShader* VS;
ID3D11GeometryShader* GS;
UINT PSInstancesCount, VSInstancesCount, GSInstancesCount;
ID3D11ClassInstance *PSInstances[256], *VSInstances[256], *GSInstances[256]; // 256 is max according to PSSetShader documentation
D3D11_PRIMITIVE_TOPOLOGY PrimitiveTopology;
ID3D11Buffer* IndexBuffer, *VertexBuffer, *VSConstantBuffer;
UINT IndexBufferOffset, VertexBufferStride, VertexBufferOffset;
DXGI_FORMAT IndexBufferFormat;
ID3D11InputLayout* InputLayout;
};
BACKUP_DX11_STATE old = {};
old.ScissorRectsCount = old.ViewportsCount = D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE;
device->RSGetScissorRects(&old.ScissorRectsCount, old.ScissorRects);
device->RSGetViewports(&old.ViewportsCount, old.Viewports);
device->RSGetState(&old.RS);
device->OMGetBlendState(&old.BlendState, old.BlendFactor, &old.SampleMask);
device->OMGetDepthStencilState(&old.DepthStencilState, &old.StencilRef);
device->PSGetShaderResources(0, 1, &old.PSShaderResource);
device->PSGetSamplers(0, 1, &old.PSSampler);
old.PSInstancesCount = old.VSInstancesCount = old.GSInstancesCount = 256;
device->PSGetShader(&old.PS, old.PSInstances, &old.PSInstancesCount);
device->VSGetShader(&old.VS, old.VSInstances, &old.VSInstancesCount);
device->VSGetConstantBuffers(0, 1, &old.VSConstantBuffer);
device->GSGetShader(&old.GS, old.GSInstances, &old.GSInstancesCount);
device->IAGetPrimitiveTopology(&old.PrimitiveTopology);
device->IAGetIndexBuffer(&old.IndexBuffer, &old.IndexBufferFormat, &old.IndexBufferOffset);
device->IAGetVertexBuffers(0, 1, &old.VertexBuffer, &old.VertexBufferStride, &old.VertexBufferOffset);
device->IAGetInputLayout(&old.InputLayout);
// Setup desired DX state
ImGui_ImplDX11_SetupRenderState(draw_data, device);
// Setup render state structure (for callbacks and custom texture bindings)
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
ImGui_ImplDX11_RenderState render_state;
render_state.Device = bd->pd3dDevice;
render_state.DeviceContext = bd->pd3dDeviceContext;
render_state.SamplerDefault = bd->pTexSamplerLinear;
render_state.VertexConstantBuffer = bd->pVertexConstantBuffer;
platform_io.Renderer_RenderState = &render_state;
// Render command lists
// (Because we merged all buffers into a single one, we maintain our own offset into them)
int global_idx_offset = 0;
int global_vtx_offset = 0;
ImVec2 clip_off = draw_data->DisplayPos;
ImVec2 clip_scale = draw_data->FramebufferScale;
for (const ImDrawList* draw_list : draw_data->CmdLists)
{
for (int cmd_i = 0; cmd_i < draw_list->CmdBuffer.Size; cmd_i++)
{
const ImDrawCmd* pcmd = &draw_list->CmdBuffer[cmd_i];
if (pcmd->UserCallback != nullptr)
{
// User callback, registered via ImDrawList::AddCallback()
// (ImDrawCallback_ResetRenderState is a special callback value used by the user to request the renderer to reset render state.)
if (pcmd->UserCallback == ImDrawCallback_ResetRenderState)
ImGui_ImplDX11_SetupRenderState(draw_data, device);
else
pcmd->UserCallback(draw_list, pcmd);
}
else
{
// Project scissor/clipping rectangles into framebuffer space
ImVec2 clip_min((pcmd->ClipRect.x - clip_off.x) * clip_scale.x, (pcmd->ClipRect.y - clip_off.y) * clip_scale.y);
ImVec2 clip_max((pcmd->ClipRect.z - clip_off.x) * clip_scale.x, (pcmd->ClipRect.w - clip_off.y) * clip_scale.y);
if (clip_max.x <= clip_min.x || clip_max.y <= clip_min.y)
continue;
// Apply scissor/clipping rectangle
const D3D11_RECT r = { (LONG)clip_min.x, (LONG)clip_min.y, (LONG)clip_max.x, (LONG)clip_max.y };
device->RSSetScissorRects(1, &r);
// Bind texture, Draw
ID3D11ShaderResourceView* texture_srv = (ID3D11ShaderResourceView*)pcmd->GetTexID();
device->PSSetShaderResources(0, 1, &texture_srv);
device->DrawIndexed(pcmd->ElemCount, pcmd->IdxOffset + global_idx_offset, pcmd->VtxOffset + global_vtx_offset);
}
}
global_idx_offset += draw_list->IdxBuffer.Size;
global_vtx_offset += draw_list->VtxBuffer.Size;
}
platform_io.Renderer_RenderState = nullptr;
// Restore modified DX state
device->RSSetScissorRects(old.ScissorRectsCount, old.ScissorRects);
device->RSSetViewports(old.ViewportsCount, old.Viewports);
device->RSSetState(old.RS); if (old.RS) old.RS->Release();
device->OMSetBlendState(old.BlendState, old.BlendFactor, old.SampleMask); if (old.BlendState) old.BlendState->Release();
device->OMSetDepthStencilState(old.DepthStencilState, old.StencilRef); if (old.DepthStencilState) old.DepthStencilState->Release();
device->PSSetShaderResources(0, 1, &old.PSShaderResource); if (old.PSShaderResource) old.PSShaderResource->Release();
device->PSSetSamplers(0, 1, &old.PSSampler); if (old.PSSampler) old.PSSampler->Release();
device->PSSetShader(old.PS, old.PSInstances, old.PSInstancesCount); if (old.PS) old.PS->Release();
for (UINT i = 0; i < old.PSInstancesCount; i++) if (old.PSInstances[i]) old.PSInstances[i]->Release();
device->VSSetShader(old.VS, old.VSInstances, old.VSInstancesCount); if (old.VS) old.VS->Release();
device->VSSetConstantBuffers(0, 1, &old.VSConstantBuffer); if (old.VSConstantBuffer) old.VSConstantBuffer->Release();
device->GSSetShader(old.GS, old.GSInstances, old.GSInstancesCount); if (old.GS) old.GS->Release();
for (UINT i = 0; i < old.VSInstancesCount; i++) if (old.VSInstances[i]) old.VSInstances[i]->Release();
device->IASetPrimitiveTopology(old.PrimitiveTopology);
device->IASetIndexBuffer(old.IndexBuffer, old.IndexBufferFormat, old.IndexBufferOffset); if (old.IndexBuffer) old.IndexBuffer->Release();
device->IASetVertexBuffers(0, 1, &old.VertexBuffer, &old.VertexBufferStride, &old.VertexBufferOffset); if (old.VertexBuffer) old.VertexBuffer->Release();
device->IASetInputLayout(old.InputLayout); if (old.InputLayout) old.InputLayout->Release();
}
static void ImGui_ImplDX11_DestroyTexture(ImTextureData* tex)
{
if (ImGui_ImplDX11_Texture* backend_tex = (ImGui_ImplDX11_Texture*)tex->BackendUserData)
{
IM_ASSERT(backend_tex->pTextureView == (ID3D11ShaderResourceView*)(intptr_t)tex->TexID);
backend_tex->pTextureView->Release();
backend_tex->pTexture->Release();
IM_DELETE(backend_tex);
// Clear identifiers and mark as destroyed (in order to allow e.g. calling InvalidateDeviceObjects while running)
tex->SetTexID(ImTextureID_Invalid);
tex->BackendUserData = nullptr;
}
tex->SetStatus(ImTextureStatus_Destroyed);
}
void ImGui_ImplDX11_UpdateTexture(ImTextureData* tex)
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
if (tex->Status == ImTextureStatus_WantCreate)
{
// Create and upload new texture to graphics system
//IMGUI_DEBUG_LOG("UpdateTexture #%03d: WantCreate %dx%d\n", tex->UniqueID, tex->Width, tex->Height);
IM_ASSERT(tex->TexID == ImTextureID_Invalid && tex->BackendUserData == nullptr);
IM_ASSERT(tex->Format == ImTextureFormat_RGBA32);
unsigned int* pixels = (unsigned int*)tex->GetPixels();
ImGui_ImplDX11_Texture* backend_tex = IM_NEW(ImGui_ImplDX11_Texture)();
// Create texture
D3D11_TEXTURE2D_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.Width = (UINT)tex->Width;
desc.Height = (UINT)tex->Height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA subResource;
subResource.pSysMem = pixels;
subResource.SysMemPitch = desc.Width * 4;
subResource.SysMemSlicePitch = 0;
bd->pd3dDevice->CreateTexture2D(&desc, &subResource, &backend_tex->pTexture);
IM_ASSERT(backend_tex->pTexture != nullptr && "Backend failed to create texture!");
// Create texture view
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
ZeroMemory(&srvDesc, sizeof(srvDesc));
srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = desc.MipLevels;
srvDesc.Texture2D.MostDetailedMip = 0;
bd->pd3dDevice->CreateShaderResourceView(backend_tex->pTexture, &srvDesc, &backend_tex->pTextureView);
IM_ASSERT(backend_tex->pTextureView != nullptr && "Backend failed to create texture!");
// Store identifiers
tex->SetTexID((ImTextureID)(intptr_t)backend_tex->pTextureView);
tex->SetStatus(ImTextureStatus_OK);
tex->BackendUserData = backend_tex;
}
else if (tex->Status == ImTextureStatus_WantUpdates)
{
// Update selected blocks. We only ever write to textures regions which have never been used before!
// This backend choose to use tex->Updates[] but you can use tex->UpdateRect to upload a single region.
ImGui_ImplDX11_Texture* backend_tex = (ImGui_ImplDX11_Texture*)tex->BackendUserData;
IM_ASSERT(backend_tex->pTextureView == (ID3D11ShaderResourceView*)(intptr_t)tex->TexID);
for (ImTextureRect& r : tex->Updates)
{
D3D11_BOX box = { (UINT)r.x, (UINT)r.y, (UINT)0, (UINT)(r.x + r.w), (UINT)(r.y + r .h), (UINT)1 };
bd->pd3dDeviceContext->UpdateSubresource(backend_tex->pTexture, 0, &box, tex->GetPixelsAt(r.x, r.y), (UINT)tex->GetPitch(), 0);
}
tex->SetStatus(ImTextureStatus_OK);
}
if (tex->Status == ImTextureStatus_WantDestroy && tex->UnusedFrames > 0)
ImGui_ImplDX11_DestroyTexture(tex);
}
bool ImGui_ImplDX11_CreateDeviceObjects()
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
if (!bd->pd3dDevice)
return false;
ImGui_ImplDX11_InvalidateDeviceObjects();
// By using D3DCompile() from <d3dcompiler.h> / d3dcompiler.lib, we introduce a dependency to a given version of d3dcompiler_XX.dll (see D3DCOMPILER_DLL_A)
// If you would like to use this DX11 sample code but remove this dependency you can:
// 1) compile once, save the compiled shader blobs into a file or source code and pass them to CreateVertexShader()/CreatePixelShader() [preferred solution]
// 2) use code to detect any version of the DLL and grab a pointer to D3DCompile from the DLL.
// See https://github.com/ocornut/imgui/pull/638 for sources and details.
// Create the vertex shader
{
static const char* vertexShader =
"cbuffer vertexBuffer : register(b0) \
{\
float4x4 ProjectionMatrix; \
};\
struct VS_INPUT\
{\
float2 pos : POSITION;\
float4 col : COLOR0;\
float2 uv : TEXCOORD0;\
};\
\
struct PS_INPUT\
{\
float4 pos : SV_POSITION;\
float4 col : COLOR0;\
float2 uv : TEXCOORD0;\
};\
\
PS_INPUT main(VS_INPUT input)\
{\
PS_INPUT output;\
output.pos = mul( ProjectionMatrix, float4(input.pos.xy, 0.f, 1.f));\
output.col = input.col;\
output.uv = input.uv;\
return output;\
}";
ID3DBlob* vertexShaderBlob;
if (FAILED(D3DCompile(vertexShader, strlen(vertexShader), nullptr, nullptr, nullptr, "main", "vs_4_0", 0, 0, &vertexShaderBlob, nullptr)))
return false; // NB: Pass ID3DBlob* pErrorBlob to D3DCompile() to get error showing in (const char*)pErrorBlob->GetBufferPointer(). Make sure to Release() the blob!
if (bd->pd3dDevice->CreateVertexShader(vertexShaderBlob->GetBufferPointer(), vertexShaderBlob->GetBufferSize(), nullptr, &bd->pVertexShader) != S_OK)
{
vertexShaderBlob->Release();
return false;
}
// Create the input layout
D3D11_INPUT_ELEMENT_DESC local_layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, (UINT)offsetof(ImDrawVert, pos), D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, (UINT)offsetof(ImDrawVert, uv), D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0, (UINT)offsetof(ImDrawVert, col), D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
if (bd->pd3dDevice->CreateInputLayout(local_layout, 3, vertexShaderBlob->GetBufferPointer(), vertexShaderBlob->GetBufferSize(), &bd->pInputLayout) != S_OK)
{
vertexShaderBlob->Release();
return false;
}
vertexShaderBlob->Release();
// Create the constant buffer
{
D3D11_BUFFER_DESC desc = {};
desc.ByteWidth = sizeof(VERTEX_CONSTANT_BUFFER_DX11);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0;
bd->pd3dDevice->CreateBuffer(&desc, nullptr, &bd->pVertexConstantBuffer);
}
}
// Create the pixel shader
{
static const char* pixelShader =
"struct PS_INPUT\
{\
float4 pos : SV_POSITION;\
float4 col : COLOR0;\
float2 uv : TEXCOORD0;\
};\
sampler sampler0;\
Texture2D texture0;\
\
float4 main(PS_INPUT input) : SV_Target\
{\
float4 out_col = input.col * texture0.Sample(sampler0, input.uv); \
return out_col; \
}";
ID3DBlob* pixelShaderBlob;
if (FAILED(D3DCompile(pixelShader, strlen(pixelShader), nullptr, nullptr, nullptr, "main", "ps_4_0", 0, 0, &pixelShaderBlob, nullptr)))
return false; // NB: Pass ID3DBlob* pErrorBlob to D3DCompile() to get error showing in (const char*)pErrorBlob->GetBufferPointer(). Make sure to Release() the blob!
if (bd->pd3dDevice->CreatePixelShader(pixelShaderBlob->GetBufferPointer(), pixelShaderBlob->GetBufferSize(), nullptr, &bd->pPixelShader) != S_OK)
{
pixelShaderBlob->Release();
return false;
}
pixelShaderBlob->Release();
}
// Create the blending setup
{
D3D11_BLEND_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.AlphaToCoverageEnable = false;
desc.RenderTarget[0].BlendEnable = true;
desc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
desc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
desc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
desc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
desc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_INV_SRC_ALPHA;
desc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
desc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
bd->pd3dDevice->CreateBlendState(&desc, &bd->pBlendState);
}
// Create the rasterizer state
{
D3D11_RASTERIZER_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.FillMode = D3D11_FILL_SOLID;
desc.CullMode = D3D11_CULL_NONE;
desc.ScissorEnable = true;
desc.DepthClipEnable = true;
bd->pd3dDevice->CreateRasterizerState(&desc, &bd->pRasterizerState);
}
// Create depth-stencil State
{
D3D11_DEPTH_STENCIL_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.DepthEnable = false;
desc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
desc.DepthFunc = D3D11_COMPARISON_ALWAYS;
desc.StencilEnable = false;
desc.FrontFace.StencilFailOp = desc.FrontFace.StencilDepthFailOp = desc.FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
desc.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
desc.BackFace = desc.FrontFace;
bd->pd3dDevice->CreateDepthStencilState(&desc, &bd->pDepthStencilState);
}
// Create texture sampler
// (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling)
{
D3D11_SAMPLER_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
desc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
desc.MipLODBias = 0.f;
desc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
desc.MinLOD = 0.f;
desc.MaxLOD = 0.f;
bd->pd3dDevice->CreateSamplerState(&desc, &bd->pTexSamplerLinear);
}
return true;
}
void ImGui_ImplDX11_InvalidateDeviceObjects()
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
if (!bd->pd3dDevice)
return;
// Destroy all textures
for (ImTextureData* tex : ImGui::GetPlatformIO().Textures)
if (tex->RefCount == 1)
ImGui_ImplDX11_DestroyTexture(tex);
if (bd->pTexSamplerLinear) { bd->pTexSamplerLinear->Release(); bd->pTexSamplerLinear = nullptr; }
if (bd->pIB) { bd->pIB->Release(); bd->pIB = nullptr; }
if (bd->pVB) { bd->pVB->Release(); bd->pVB = nullptr; }
if (bd->pBlendState) { bd->pBlendState->Release(); bd->pBlendState = nullptr; }
if (bd->pDepthStencilState) { bd->pDepthStencilState->Release(); bd->pDepthStencilState = nullptr; }
if (bd->pRasterizerState) { bd->pRasterizerState->Release(); bd->pRasterizerState = nullptr; }
if (bd->pPixelShader) { bd->pPixelShader->Release(); bd->pPixelShader = nullptr; }
if (bd->pVertexConstantBuffer) { bd->pVertexConstantBuffer->Release(); bd->pVertexConstantBuffer = nullptr; }
if (bd->pInputLayout) { bd->pInputLayout->Release(); bd->pInputLayout = nullptr; }
if (bd->pVertexShader) { bd->pVertexShader->Release(); bd->pVertexShader = nullptr; }
}
bool ImGui_ImplDX11_Init(ID3D11Device* device, ID3D11DeviceContext* device_context)
{
ImGuiIO& io = ImGui::GetIO();
IMGUI_CHECKVERSION();
IM_ASSERT(io.BackendRendererUserData == nullptr && "Already initialized a renderer backend!");
// Setup backend capabilities flags
ImGui_ImplDX11_Data* bd = IM_NEW(ImGui_ImplDX11_Data)();
io.BackendRendererUserData = (void*)bd;
io.BackendRendererName = "imgui_impl_dx11";
io.BackendFlags |= ImGuiBackendFlags_RendererHasVtxOffset; // We can honor the ImDrawCmd::VtxOffset field, allowing for large meshes.
io.BackendFlags |= ImGuiBackendFlags_RendererHasTextures; // We can honor ImGuiPlatformIO::Textures[] requests during render.
io.BackendFlags |= ImGuiBackendFlags_RendererHasViewports; // We can create multi-viewports on the Renderer side (optional)
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
platform_io.Renderer_TextureMaxWidth = platform_io.Renderer_TextureMaxHeight = D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION;
// Get factory from device
IDXGIDevice* pDXGIDevice = nullptr;
IDXGIAdapter* pDXGIAdapter = nullptr;
IDXGIFactory* pFactory = nullptr;
if (device->QueryInterface(IID_PPV_ARGS(&pDXGIDevice)) == S_OK)
if (pDXGIDevice->GetParent(IID_PPV_ARGS(&pDXGIAdapter)) == S_OK)
if (pDXGIAdapter->GetParent(IID_PPV_ARGS(&pFactory)) == S_OK)
{
bd->pd3dDevice = device;
bd->pd3dDeviceContext = device_context;
bd->pFactory = pFactory;
}
if (pDXGIDevice) pDXGIDevice->Release();
if (pDXGIAdapter) pDXGIAdapter->Release();
bd->pd3dDevice->AddRef();
bd->pd3dDeviceContext->AddRef();
ImGui_ImplDX11_InitMultiViewportSupport();
return true;
}
void ImGui_ImplDX11_Shutdown()
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
IM_ASSERT(bd != nullptr && "No renderer backend to shutdown, or already shutdown?");
ImGuiIO& io = ImGui::GetIO();
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
ImGui_ImplDX11_ShutdownMultiViewportSupport();
ImGui_ImplDX11_InvalidateDeviceObjects();
if (bd->pFactory) { bd->pFactory->Release(); }
if (bd->pd3dDevice) { bd->pd3dDevice->Release(); }
if (bd->pd3dDeviceContext) { bd->pd3dDeviceContext->Release(); }
io.BackendRendererName = nullptr;
io.BackendRendererUserData = nullptr;
io.BackendFlags &= ~(ImGuiBackendFlags_RendererHasVtxOffset | ImGuiBackendFlags_RendererHasTextures | ImGuiBackendFlags_RendererHasViewports);
platform_io.ClearRendererHandlers();
IM_DELETE(bd);
}
void ImGui_ImplDX11_NewFrame()
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
IM_ASSERT(bd != nullptr && "Context or backend not initialized! Did you call ImGui_ImplDX11_Init()?");
if (!bd->pVertexShader)
if (!ImGui_ImplDX11_CreateDeviceObjects())
IM_ASSERT(0 && "ImGui_ImplDX11_CreateDeviceObjects() failed!");
}
//--------------------------------------------------------------------------------------------------------
// MULTI-VIEWPORT / PLATFORM INTERFACE SUPPORT
// This is an _advanced_ and _optional_ feature, allowing the backend to create and handle multiple viewports simultaneously.
// If you are new to dear imgui or creating a new binding for dear imgui, it is recommended that you completely ignore this section first..
//--------------------------------------------------------------------------------------------------------
// Helper structure we store in the void* RendererUserData field of each ImGuiViewport to easily retrieve our backend data.
struct ImGui_ImplDX11_ViewportData
{
IDXGISwapChain* SwapChain;
ID3D11RenderTargetView* RTView;
ImGui_ImplDX11_ViewportData() { SwapChain = nullptr; RTView = nullptr; }
~ImGui_ImplDX11_ViewportData() { IM_ASSERT(SwapChain == nullptr && RTView == nullptr); }
};
// Multi-Viewports: configure templates used when creating swapchains for secondary viewports. Will try them in order.
// This is intentionally not declared in the .h file yet, so you will need to copy this declaration:
void ImGui_ImplDX11_SetSwapChainDescs(const DXGI_SWAP_CHAIN_DESC* desc_templates, int desc_templates_count);
void ImGui_ImplDX11_SetSwapChainDescs(const DXGI_SWAP_CHAIN_DESC* desc_templates, int desc_templates_count)
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
bd->SwapChainDescsForViewports.resize(desc_templates_count);
memcpy(bd->SwapChainDescsForViewports.Data, desc_templates, sizeof(DXGI_SWAP_CHAIN_DESC));
}
static void ImGui_ImplDX11_CreateWindow(ImGuiViewport* viewport)
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
ImGui_ImplDX11_ViewportData* vd = IM_NEW(ImGui_ImplDX11_ViewportData)();
viewport->RendererUserData = vd;
// PlatformHandleRaw should always be a HWND, whereas PlatformHandle might be a higher-level handle (e.g. GLFWWindow*, SDL's WindowID).
// Some backends will leave PlatformHandleRaw == 0, in which case we assume PlatformHandle will contain the HWND.
HWND hwnd = viewport->PlatformHandleRaw ? (HWND)viewport->PlatformHandleRaw : (HWND)viewport->PlatformHandle;
IM_ASSERT(hwnd != 0);
IM_ASSERT(vd->SwapChain == nullptr && vd->RTView == nullptr);
// Create swap chain
HRESULT hr = DXGI_ERROR_UNSUPPORTED;
for (const DXGI_SWAP_CHAIN_DESC& sd_template : bd->SwapChainDescsForViewports)
{
IM_ASSERT(sd_template.BufferDesc.Width == 0 && sd_template.BufferDesc.Height == 0 && sd_template.OutputWindow == nullptr);
DXGI_SWAP_CHAIN_DESC sd = sd_template;
sd.BufferDesc.Width = (UINT)viewport->Size.x;
sd.BufferDesc.Height = (UINT)viewport->Size.y;
sd.OutputWindow = hwnd;
hr = bd->pFactory->CreateSwapChain(bd->pd3dDevice, &sd, &vd->SwapChain);
if (SUCCEEDED(hr))
break;
}
IM_ASSERT(SUCCEEDED(hr));
bd->pFactory->MakeWindowAssociation(hwnd, DXGI_MWA_NO_ALT_ENTER | DXGI_MWA_NO_WINDOW_CHANGES); // Disable e.g. Alt+Enter
// Create the render target
if (vd->SwapChain != nullptr)
{
ID3D11Texture2D* pBackBuffer;
vd->SwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer));
bd->pd3dDevice->CreateRenderTargetView(pBackBuffer, nullptr, &vd->RTView);
pBackBuffer->Release();
}
}
static void ImGui_ImplDX11_DestroyWindow(ImGuiViewport* viewport)
{
// The main viewport (owned by the application) will always have RendererUserData == nullptr since we didn't create the data for it.
if (ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData)
{
if (vd->SwapChain)
vd->SwapChain->Release();
vd->SwapChain = nullptr;
if (vd->RTView)
vd->RTView->Release();
vd->RTView = nullptr;
IM_DELETE(vd);
}
viewport->RendererUserData = nullptr;
}
static void ImGui_ImplDX11_SetWindowSize(ImGuiViewport* viewport, ImVec2 size)
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData;
if (vd->RTView)
{
vd->RTView->Release();
vd->RTView = nullptr;
}
if (vd->SwapChain)
{
ID3D11Texture2D* pBackBuffer = nullptr;
vd->SwapChain->ResizeBuffers(0, (UINT)size.x, (UINT)size.y, DXGI_FORMAT_UNKNOWN, 0);
vd->SwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer));
if (pBackBuffer == nullptr) { fprintf(stderr, "ImGui_ImplDX11_SetWindowSize() failed creating buffers.\n"); return; }
bd->pd3dDevice->CreateRenderTargetView(pBackBuffer, nullptr, &vd->RTView);
pBackBuffer->Release();
}
}
static void ImGui_ImplDX11_RenderWindow(ImGuiViewport* viewport, void*)
{
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData;
ImVec4 clear_color = ImVec4(0.0f, 0.0f, 0.0f, 1.0f);
bd->pd3dDeviceContext->OMSetRenderTargets(1, &vd->RTView, nullptr);
if (!(viewport->Flags & ImGuiViewportFlags_NoRendererClear))
bd->pd3dDeviceContext->ClearRenderTargetView(vd->RTView, (float*)&clear_color);
ImGui_ImplDX11_RenderDrawData(viewport->DrawData);
}
static void ImGui_ImplDX11_SwapBuffers(ImGuiViewport* viewport, void*)
{
ImGui_ImplDX11_ViewportData* vd = (ImGui_ImplDX11_ViewportData*)viewport->RendererUserData;
if (vd->SwapChain)
vd->SwapChain->Present(0, 0); // Present without vsync
}
static void ImGui_ImplDX11_InitMultiViewportSupport()
{
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
platform_io.Renderer_CreateWindow = ImGui_ImplDX11_CreateWindow;
platform_io.Renderer_DestroyWindow = ImGui_ImplDX11_DestroyWindow;
platform_io.Renderer_SetWindowSize = ImGui_ImplDX11_SetWindowSize;
platform_io.Renderer_RenderWindow = ImGui_ImplDX11_RenderWindow;
platform_io.Renderer_SwapBuffers = ImGui_ImplDX11_SwapBuffers;
// Default swapchain format
DXGI_SWAP_CHAIN_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sd.SampleDesc.Count = 1;
sd.SampleDesc.Quality = 0;
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
sd.BufferCount = 1;
sd.Windowed = TRUE;
sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
sd.Flags = 0;
ImGui_ImplDX11_SetSwapChainDescs(&sd, 1);
}
static void ImGui_ImplDX11_ShutdownMultiViewportSupport()
{
ImGui::DestroyPlatformWindows();
}
//-----------------------------------------------------------------------------
#endif // #ifndef IMGUI_DISABLE

View File

@ -0,0 +1,52 @@
// dear imgui: Renderer Backend for DirectX11
// This needs to be used along with a Platform Backend (e.g. Win32)
// Implemented features:
// [X] Renderer: User texture binding. Use 'ID3D11ShaderResourceView*' as texture identifier. Read the FAQ about ImTextureID/ImTextureRef!
// [X] Renderer: Large meshes support (64k+ vertices) even with 16-bit indices (ImGuiBackendFlags_RendererHasVtxOffset).
// [X] Renderer: Texture updates support for dynamic font atlas (ImGuiBackendFlags_RendererHasTextures).
// [X] Renderer: Expose selected render state for draw callbacks to use. Access in '(ImGui_ImplXXXX_RenderState*)GetPlatformIO().Renderer_RenderState'.
// [X] Renderer: Multi-viewport support (multiple windows). Enable with 'io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable'.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// Learn about Dear ImGui:
// - FAQ https://dearimgui.com/faq
// - Getting Started https://dearimgui.com/getting-started
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
// - Introduction, links and more at the top of imgui.cpp
#pragma once
#include "imgui.h" // IMGUI_IMPL_API
#ifndef IMGUI_DISABLE
struct ID3D11Device;
struct ID3D11DeviceContext;
struct ID3D11SamplerState;
struct ID3D11Buffer;
// Follow "Getting Started" link and check examples/ folder to learn about using backends!
IMGUI_IMPL_API bool ImGui_ImplDX11_Init(ID3D11Device* device, ID3D11DeviceContext* device_context);
IMGUI_IMPL_API void ImGui_ImplDX11_Shutdown();
IMGUI_IMPL_API void ImGui_ImplDX11_NewFrame();
IMGUI_IMPL_API void ImGui_ImplDX11_RenderDrawData(ImDrawData* draw_data);
// Use if you want to reset your rendering device without losing Dear ImGui state.
IMGUI_IMPL_API bool ImGui_ImplDX11_CreateDeviceObjects();
IMGUI_IMPL_API void ImGui_ImplDX11_InvalidateDeviceObjects();
// (Advanced) Use e.g. if you need to precisely control the timing of texture updates (e.g. for staged rendering), by setting ImDrawData::Textures = NULL to handle this manually.
IMGUI_IMPL_API void ImGui_ImplDX11_UpdateTexture(ImTextureData* tex);
// [BETA] Selected render state data shared with callbacks.
// This is temporarily stored in GetPlatformIO().Renderer_RenderState during the ImGui_ImplDX11_RenderDrawData() call.
// (Please open an issue if you feel you need access to more data)
struct ImGui_ImplDX11_RenderState
{
ID3D11Device* Device;
ID3D11DeviceContext* DeviceContext;
ID3D11SamplerState* SamplerDefault;
ID3D11Buffer* VertexConstantBuffer;
};
#endif // #ifndef IMGUI_DISABLE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,54 @@
// dear imgui: Platform Backend for Windows (standard windows API for 32-bits AND 64-bits applications)
// This needs to be used along with a Renderer (e.g. DirectX11, OpenGL3, Vulkan..)
// Implemented features:
// [X] Platform: Clipboard support (for Win32 this is actually part of core dear imgui)
// [X] Platform: Mouse support. Can discriminate Mouse/TouchScreen/Pen.
// [X] Platform: Keyboard support. Since 1.87 we are using the io.AddKeyEvent() function. Pass ImGuiKey values to all key functions e.g. ImGui::IsKeyPressed(ImGuiKey_Space). [Legacy VK_* values are obsolete since 1.87 and not supported since 1.91.5]
// [X] Platform: Gamepad support.
// [X] Platform: Mouse cursor shape and visibility (ImGuiBackendFlags_HasMouseCursors). Disable with 'io.ConfigFlags |= ImGuiConfigFlags_NoMouseCursorChange'.
// [X] Platform: Multi-viewport support (multiple windows). Enable with 'io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable'.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// Learn about Dear ImGui:
// - FAQ https://dearimgui.com/faq
// - Getting Started https://dearimgui.com/getting-started
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
// - Introduction, links and more at the top of imgui.cpp
#pragma once
#include "imgui.h" // IMGUI_IMPL_API
#ifndef IMGUI_DISABLE
// Follow "Getting Started" link and check examples/ folder to learn about using backends!
IMGUI_IMPL_API bool ImGui_ImplWin32_Init(void* hwnd);
IMGUI_IMPL_API bool ImGui_ImplWin32_InitForOpenGL(void* hwnd);
IMGUI_IMPL_API void ImGui_ImplWin32_Shutdown();
IMGUI_IMPL_API void ImGui_ImplWin32_NewFrame();
// Win32 message handler your application need to call.
// - Intentionally commented out in a '#if 0' block to avoid dragging dependencies on <windows.h> from this helper.
// - You should COPY the line below into your .cpp code to forward declare the function and then you can call it.
// - Call from your application's message handler. Keep calling your message handler unless this function returns TRUE.
#if 0
extern IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam);
#endif
// DPI-related helpers (optional)
// - Use to enable DPI awareness without having to create an application manifest.
// - Your own app may already do this via a manifest or explicit calls. This is mostly useful for our examples/ apps.
// - In theory we could call simple functions from Windows SDK such as SetProcessDPIAware(), SetProcessDpiAwareness(), etc.
// but most of the functions provided by Microsoft require Windows 8.1/10+ SDK at compile time and Windows 8/10+ at runtime,
// neither we want to require the user to have. So we dynamically select and load those functions to avoid dependencies.
IMGUI_IMPL_API void ImGui_ImplWin32_EnableDpiAwareness();
IMGUI_IMPL_API float ImGui_ImplWin32_GetDpiScaleForHwnd(void* hwnd); // HWND hwnd
IMGUI_IMPL_API float ImGui_ImplWin32_GetDpiScaleForMonitor(void* monitor); // HMONITOR monitor
// Transparency related helpers (optional) [experimental]
// - Use to enable alpha compositing transparency with the desktop.
// - Use together with e.g. clearing your framebuffer with zero-alpha.
IMGUI_IMPL_API void ImGui_ImplWin32_EnableAlphaCompositing(void* hwnd); // HWND hwnd
#endif // #ifndef IMGUI_DISABLE

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,627 @@
// [DEAR IMGUI]
// This is a slightly modified version of stb_rect_pack.h 1.01.
// Grep for [DEAR IMGUI] to find the changes.
//
// stb_rect_pack.h - v1.01 - public domain - rectangle packing
// Sean Barrett 2014
//
// Useful for e.g. packing rectangular textures into an atlas.
// Does not do rotation.
//
// Before #including,
//
// #define STB_RECT_PACK_IMPLEMENTATION
//
// in the file that you want to have the implementation.
//
// Not necessarily the awesomest packing method, but better than
// the totally naive one in stb_truetype (which is primarily what
// this is meant to replace).
//
// Has only had a few tests run, may have issues.
//
// More docs to come.
//
// No memory allocations; uses qsort() and assert() from stdlib.
// Can override those by defining STBRP_SORT and STBRP_ASSERT.
//
// This library currently uses the Skyline Bottom-Left algorithm.
//
// Please note: better rectangle packers are welcome! Please
// implement them to the same API, but with a different init
// function.
//
// Credits
//
// Library
// Sean Barrett
// Minor features
// Martins Mozeiko
// github:IntellectualKitty
//
// Bugfixes / warning fixes
// Jeremy Jaussaud
// Fabian Giesen
//
// Version history:
//
// 1.01 (2021-07-11) always use large rect mode, expose STBRP__MAXVAL in public section
// 1.00 (2019-02-25) avoid small space waste; gracefully fail too-wide rectangles
// 0.99 (2019-02-07) warning fixes
// 0.11 (2017-03-03) return packing success/fail result
// 0.10 (2016-10-25) remove cast-away-const to avoid warnings
// 0.09 (2016-08-27) fix compiler warnings
// 0.08 (2015-09-13) really fix bug with empty rects (w=0 or h=0)
// 0.07 (2015-09-13) fix bug with empty rects (w=0 or h=0)
// 0.06 (2015-04-15) added STBRP_SORT to allow replacing qsort
// 0.05: added STBRP_ASSERT to allow replacing assert
// 0.04: fixed minor bug in STBRP_LARGE_RECTS support
// 0.01: initial release
//
// LICENSE
//
// See end of file for license information.
//////////////////////////////////////////////////////////////////////////////
//
// INCLUDE SECTION
//
#ifndef STB_INCLUDE_STB_RECT_PACK_H
#define STB_INCLUDE_STB_RECT_PACK_H
#define STB_RECT_PACK_VERSION 1
#ifdef STBRP_STATIC
#define STBRP_DEF static
#else
#define STBRP_DEF extern
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef struct stbrp_context stbrp_context;
typedef struct stbrp_node stbrp_node;
typedef struct stbrp_rect stbrp_rect;
typedef int stbrp_coord;
#define STBRP__MAXVAL 0x7fffffff
// Mostly for internal use, but this is the maximum supported coordinate value.
STBRP_DEF int stbrp_pack_rects (stbrp_context *context, stbrp_rect *rects, int num_rects);
// Assign packed locations to rectangles. The rectangles are of type
// 'stbrp_rect' defined below, stored in the array 'rects', and there
// are 'num_rects' many of them.
//
// Rectangles which are successfully packed have the 'was_packed' flag
// set to a non-zero value and 'x' and 'y' store the minimum location
// on each axis (i.e. bottom-left in cartesian coordinates, top-left
// if you imagine y increasing downwards). Rectangles which do not fit
// have the 'was_packed' flag set to 0.
//
// You should not try to access the 'rects' array from another thread
// while this function is running, as the function temporarily reorders
// the array while it executes.
//
// To pack into another rectangle, you need to call stbrp_init_target
// again. To continue packing into the same rectangle, you can call
// this function again. Calling this multiple times with multiple rect
// arrays will probably produce worse packing results than calling it
// a single time with the full rectangle array, but the option is
// available.
//
// The function returns 1 if all of the rectangles were successfully
// packed and 0 otherwise.
struct stbrp_rect
{
// reserved for your use:
int id;
// input:
stbrp_coord w, h;
// output:
stbrp_coord x, y;
int was_packed; // non-zero if valid packing
}; // 16 bytes, nominally
STBRP_DEF void stbrp_init_target (stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes);
// Initialize a rectangle packer to:
// pack a rectangle that is 'width' by 'height' in dimensions
// using temporary storage provided by the array 'nodes', which is 'num_nodes' long
//
// You must call this function every time you start packing into a new target.
//
// There is no "shutdown" function. The 'nodes' memory must stay valid for
// the following stbrp_pack_rects() call (or calls), but can be freed after
// the call (or calls) finish.
//
// Note: to guarantee best results, either:
// 1. make sure 'num_nodes' >= 'width'
// or 2. call stbrp_allow_out_of_mem() defined below with 'allow_out_of_mem = 1'
//
// If you don't do either of the above things, widths will be quantized to multiples
// of small integers to guarantee the algorithm doesn't run out of temporary storage.
//
// If you do #2, then the non-quantized algorithm will be used, but the algorithm
// may run out of temporary storage and be unable to pack some rectangles.
STBRP_DEF void stbrp_setup_allow_out_of_mem (stbrp_context *context, int allow_out_of_mem);
// Optionally call this function after init but before doing any packing to
// change the handling of the out-of-temp-memory scenario, described above.
// If you call init again, this will be reset to the default (false).
STBRP_DEF void stbrp_setup_heuristic (stbrp_context *context, int heuristic);
// Optionally select which packing heuristic the library should use. Different
// heuristics will produce better/worse results for different data sets.
// If you call init again, this will be reset to the default.
enum
{
STBRP_HEURISTIC_Skyline_default=0,
STBRP_HEURISTIC_Skyline_BL_sortHeight = STBRP_HEURISTIC_Skyline_default,
STBRP_HEURISTIC_Skyline_BF_sortHeight
};
//////////////////////////////////////////////////////////////////////////////
//
// the details of the following structures don't matter to you, but they must
// be visible so you can handle the memory allocations for them
struct stbrp_node
{
stbrp_coord x,y;
stbrp_node *next;
};
struct stbrp_context
{
int width;
int height;
int align;
int init_mode;
int heuristic;
int num_nodes;
stbrp_node *active_head;
stbrp_node *free_head;
stbrp_node extra[2]; // we allocate two extra nodes so optimal user-node-count is 'width' not 'width+2'
};
#ifdef __cplusplus
}
#endif
#endif
//////////////////////////////////////////////////////////////////////////////
//
// IMPLEMENTATION SECTION
//
#ifdef STB_RECT_PACK_IMPLEMENTATION
#ifndef STBRP_SORT
#include <stdlib.h>
#define STBRP_SORT qsort
#endif
#ifndef STBRP_ASSERT
#include <assert.h>
#define STBRP_ASSERT assert
#endif
#ifdef _MSC_VER
#define STBRP__NOTUSED(v) (void)(v)
#define STBRP__CDECL __cdecl
#else
#define STBRP__NOTUSED(v) (void)sizeof(v)
#define STBRP__CDECL
#endif
enum
{
STBRP__INIT_skyline = 1
};
STBRP_DEF void stbrp_setup_heuristic(stbrp_context *context, int heuristic)
{
switch (context->init_mode) {
case STBRP__INIT_skyline:
STBRP_ASSERT(heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight || heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight);
context->heuristic = heuristic;
break;
default:
STBRP_ASSERT(0);
}
}
STBRP_DEF void stbrp_setup_allow_out_of_mem(stbrp_context *context, int allow_out_of_mem)
{
if (allow_out_of_mem)
// if it's ok to run out of memory, then don't bother aligning them;
// this gives better packing, but may fail due to OOM (even though
// the rectangles easily fit). @TODO a smarter approach would be to only
// quantize once we've hit OOM, then we could get rid of this parameter.
context->align = 1;
else {
// if it's not ok to run out of memory, then quantize the widths
// so that num_nodes is always enough nodes.
//
// I.e. num_nodes * align >= width
// align >= width / num_nodes
// align = ceil(width/num_nodes)
context->align = (context->width + context->num_nodes-1) / context->num_nodes;
}
}
STBRP_DEF void stbrp_init_target(stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes)
{
int i;
for (i=0; i < num_nodes-1; ++i)
nodes[i].next = &nodes[i+1];
nodes[i].next = NULL;
context->init_mode = STBRP__INIT_skyline;
context->heuristic = STBRP_HEURISTIC_Skyline_default;
context->free_head = &nodes[0];
context->active_head = &context->extra[0];
context->width = width;
context->height = height;
context->num_nodes = num_nodes;
stbrp_setup_allow_out_of_mem(context, 0);
// node 0 is the full width, node 1 is the sentinel (lets us not store width explicitly)
context->extra[0].x = 0;
context->extra[0].y = 0;
context->extra[0].next = &context->extra[1];
context->extra[1].x = (stbrp_coord) width;
context->extra[1].y = (1<<30);
context->extra[1].next = NULL;
}
// find minimum y position if it starts at x1
static int stbrp__skyline_find_min_y(stbrp_context *c, stbrp_node *first, int x0, int width, int *pwaste)
{
stbrp_node *node = first;
int x1 = x0 + width;
int min_y, visited_width, waste_area;
STBRP__NOTUSED(c);
STBRP_ASSERT(first->x <= x0);
#if 0
// skip in case we're past the node
while (node->next->x <= x0)
++node;
#else
STBRP_ASSERT(node->next->x > x0); // we ended up handling this in the caller for efficiency
#endif
STBRP_ASSERT(node->x <= x0);
min_y = 0;
waste_area = 0;
visited_width = 0;
while (node->x < x1) {
if (node->y > min_y) {
// raise min_y higher.
// we've accounted for all waste up to min_y,
// but we'll now add more waste for everything we've visted
waste_area += visited_width * (node->y - min_y);
min_y = node->y;
// the first time through, visited_width might be reduced
if (node->x < x0)
visited_width += node->next->x - x0;
else
visited_width += node->next->x - node->x;
} else {
// add waste area
int under_width = node->next->x - node->x;
if (under_width + visited_width > width)
under_width = width - visited_width;
waste_area += under_width * (min_y - node->y);
visited_width += under_width;
}
node = node->next;
}
*pwaste = waste_area;
return min_y;
}
typedef struct
{
int x,y;
stbrp_node **prev_link;
} stbrp__findresult;
static stbrp__findresult stbrp__skyline_find_best_pos(stbrp_context *c, int width, int height)
{
int best_waste = (1<<30), best_x, best_y = (1 << 30);
stbrp__findresult fr;
stbrp_node **prev, *node, *tail, **best = NULL;
// align to multiple of c->align
width = (width + c->align - 1);
width -= width % c->align;
STBRP_ASSERT(width % c->align == 0);
// if it can't possibly fit, bail immediately
if (width > c->width || height > c->height) {
fr.prev_link = NULL;
fr.x = fr.y = 0;
return fr;
}
node = c->active_head;
prev = &c->active_head;
while (node->x + width <= c->width) {
int y,waste;
y = stbrp__skyline_find_min_y(c, node, node->x, width, &waste);
if (c->heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight) { // actually just want to test BL
// bottom left
if (y < best_y) {
best_y = y;
best = prev;
}
} else {
// best-fit
if (y + height <= c->height) {
// can only use it if it first vertically
if (y < best_y || (y == best_y && waste < best_waste)) {
best_y = y;
best_waste = waste;
best = prev;
}
}
}
prev = &node->next;
node = node->next;
}
best_x = (best == NULL) ? 0 : (*best)->x;
// if doing best-fit (BF), we also have to try aligning right edge to each node position
//
// e.g, if fitting
//
// ____________________
// |____________________|
//
// into
//
// | |
// | ____________|
// |____________|
//
// then right-aligned reduces waste, but bottom-left BL is always chooses left-aligned
//
// This makes BF take about 2x the time
if (c->heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight) {
tail = c->active_head;
node = c->active_head;
prev = &c->active_head;
// find first node that's admissible
while (tail->x < width)
tail = tail->next;
while (tail) {
int xpos = tail->x - width;
int y,waste;
STBRP_ASSERT(xpos >= 0);
// find the left position that matches this
while (node->next->x <= xpos) {
prev = &node->next;
node = node->next;
}
STBRP_ASSERT(node->next->x > xpos && node->x <= xpos);
y = stbrp__skyline_find_min_y(c, node, xpos, width, &waste);
if (y + height <= c->height) {
if (y <= best_y) {
if (y < best_y || waste < best_waste || (waste==best_waste && xpos < best_x)) {
best_x = xpos;
//STBRP_ASSERT(y <= best_y); [DEAR IMGUI]
best_y = y;
best_waste = waste;
best = prev;
}
}
}
tail = tail->next;
}
}
fr.prev_link = best;
fr.x = best_x;
fr.y = best_y;
return fr;
}
static stbrp__findresult stbrp__skyline_pack_rectangle(stbrp_context *context, int width, int height)
{
// find best position according to heuristic
stbrp__findresult res = stbrp__skyline_find_best_pos(context, width, height);
stbrp_node *node, *cur;
// bail if:
// 1. it failed
// 2. the best node doesn't fit (we don't always check this)
// 3. we're out of memory
if (res.prev_link == NULL || res.y + height > context->height || context->free_head == NULL) {
res.prev_link = NULL;
return res;
}
// on success, create new node
node = context->free_head;
node->x = (stbrp_coord) res.x;
node->y = (stbrp_coord) (res.y + height);
context->free_head = node->next;
// insert the new node into the right starting point, and
// let 'cur' point to the remaining nodes needing to be
// stiched back in
cur = *res.prev_link;
if (cur->x < res.x) {
// preserve the existing one, so start testing with the next one
stbrp_node *next = cur->next;
cur->next = node;
cur = next;
} else {
*res.prev_link = node;
}
// from here, traverse cur and free the nodes, until we get to one
// that shouldn't be freed
while (cur->next && cur->next->x <= res.x + width) {
stbrp_node *next = cur->next;
// move the current node to the free list
cur->next = context->free_head;
context->free_head = cur;
cur = next;
}
// stitch the list back in
node->next = cur;
if (cur->x < res.x + width)
cur->x = (stbrp_coord) (res.x + width);
#ifdef _DEBUG
cur = context->active_head;
while (cur->x < context->width) {
STBRP_ASSERT(cur->x < cur->next->x);
cur = cur->next;
}
STBRP_ASSERT(cur->next == NULL);
{
int count=0;
cur = context->active_head;
while (cur) {
cur = cur->next;
++count;
}
cur = context->free_head;
while (cur) {
cur = cur->next;
++count;
}
STBRP_ASSERT(count == context->num_nodes+2);
}
#endif
return res;
}
static int STBRP__CDECL rect_height_compare(const void *a, const void *b)
{
const stbrp_rect *p = (const stbrp_rect *) a;
const stbrp_rect *q = (const stbrp_rect *) b;
if (p->h > q->h)
return -1;
if (p->h < q->h)
return 1;
return (p->w > q->w) ? -1 : (p->w < q->w);
}
static int STBRP__CDECL rect_original_order(const void *a, const void *b)
{
const stbrp_rect *p = (const stbrp_rect *) a;
const stbrp_rect *q = (const stbrp_rect *) b;
return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed);
}
STBRP_DEF int stbrp_pack_rects(stbrp_context *context, stbrp_rect *rects, int num_rects)
{
int i, all_rects_packed = 1;
// we use the 'was_packed' field internally to allow sorting/unsorting
for (i=0; i < num_rects; ++i) {
rects[i].was_packed = i;
}
// sort according to heuristic
STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_height_compare);
for (i=0; i < num_rects; ++i) {
if (rects[i].w == 0 || rects[i].h == 0) {
rects[i].x = rects[i].y = 0; // empty rect needs no space
} else {
stbrp__findresult fr = stbrp__skyline_pack_rectangle(context, rects[i].w, rects[i].h);
if (fr.prev_link) {
rects[i].x = (stbrp_coord) fr.x;
rects[i].y = (stbrp_coord) fr.y;
} else {
rects[i].x = rects[i].y = STBRP__MAXVAL;
}
}
}
// unsort
STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_original_order);
// set was_packed flags and all_rects_packed status
for (i=0; i < num_rects; ++i) {
rects[i].was_packed = !(rects[i].x == STBRP__MAXVAL && rects[i].y == STBRP__MAXVAL);
if (!rects[i].was_packed)
all_rects_packed = 0;
}
// return the all_rects_packed status
return all_rects_packed;
}
#endif
/*
------------------------------------------------------------------------------
This software is available under 2 licenses -- choose whichever you prefer.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT License
Copyright (c) 2017 Sean Barrett
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

323
lib/third_party/sort_r.h vendored Normal file
View File

@ -0,0 +1,323 @@
/* Isaac Turner 29 April 2014 Public Domain */
// Source: https://github.com/noporpoise/sort_r/
#ifndef SORT_R_H_
#define SORT_R_H_
#include <stdlib.h> /* qsort_r(), qsort_s() */
#include <string.h> /* needed for memcpy() */
/*
sort_r function to be exported.
Parameters:
base is the array to be sorted
nel is the number of elements in the array
width is the size in bytes of each element of the array
compar is the comparison function
arg is a pointer to be passed to the comparison function
void sort_r(void *base, size_t nel, size_t width,
int (*compar)(const void *_a, const void *_b, void *_arg),
void *arg);
*/
#define _SORT_R_INLINE inline
#if (defined __APPLE__ || defined __MACH__ || defined __DARWIN__ || \
(defined __FreeBSD__ && !defined(qsort_r)) || defined __DragonFly__)
# define _SORT_R_BSD
#elif (defined __GLIBC__ || (defined (__FreeBSD__) && defined(qsort_r)))
# define _SORT_R_LINUX
#elif (defined _WIN32 || defined _WIN64 || defined __WINDOWS__ || \
defined __MINGW32__ || defined __MINGW64__)
# define _SORT_R_WINDOWS
# undef _SORT_R_INLINE
# define _SORT_R_INLINE __inline
#else
/* Using our own recursive quicksort sort_r_simple() */
#endif
#if (defined NESTED_QSORT && NESTED_QSORT == 0)
# undef NESTED_QSORT
#endif
#define SORT_R_SWAP(a,b,tmp) ((tmp) = (a), (a) = (b), (b) = (tmp))
/* swap a and b */
/* a and b must not be equal! */
static _SORT_R_INLINE void sort_r_swap(char *__restrict a, char *__restrict b,
size_t w)
{
char tmp, *end = a+w;
for(; a < end; a++, b++) { SORT_R_SWAP(*a, *b, tmp); }
}
/* swap a, b iff a>b */
/* a and b must not be equal! */
/* __restrict is same as restrict but better support on old machines */
static _SORT_R_INLINE int sort_r_cmpswap(char *__restrict a,
char *__restrict b, size_t w,
int (*compar)(const void *_a,
const void *_b,
void *_arg),
void *arg)
{
if(compar(a, b, arg) > 0) {
sort_r_swap(a, b, w);
return 1;
}
return 0;
}
/*
Swap consecutive blocks of bytes of size na and nb starting at memory addr ptr,
with the smallest swap so that the blocks are in the opposite order. Blocks may
be internally re-ordered e.g.
12345ab -> ab34512
123abc -> abc123
12abcde -> deabc12
*/
static _SORT_R_INLINE void sort_r_swap_blocks(char *ptr, size_t na, size_t nb)
{
if(na > 0 && nb > 0) {
if(na > nb) { sort_r_swap(ptr, ptr+na, nb); }
else { sort_r_swap(ptr, ptr+nb, na); }
}
}
/* Implement recursive quicksort ourselves */
/* Note: quicksort is not stable, equivalent values may be swapped */
static _SORT_R_INLINE void sort_r_simple(void *base, size_t nel, size_t w,
int (*compar)(const void *_a,
const void *_b,
void *_arg),
void *arg)
{
char *b = (char *)base, *end = b + nel*w;
/* for(size_t i=0; i<nel; i++) {printf("%4i", *(int*)(b + i*sizeof(int)));}
printf("\n"); */
if(nel < 10) {
/* Insertion sort for arbitrarily small inputs */
char *pi, *pj;
for(pi = b+w; pi < end; pi += w) {
for(pj = pi; pj > b && sort_r_cmpswap(pj-w,pj,w,compar,arg); pj -= w) {}
}
}
else
{
/* nel > 6; Quicksort */
int cmp;
char *pl, *ple, *pr, *pre, *pivot;
char *last = b+w*(nel-1), *tmp;
/*
Use median of second, middle and second-last items as pivot.
First and last may have been swapped with pivot and therefore be extreme
*/
char *l[3];
l[0] = b + w;
l[1] = b+w*(nel/2);
l[2] = last - w;
/* printf("pivots: %i, %i, %i\n", *(int*)l[0], *(int*)l[1], *(int*)l[2]); */
if(compar(l[0],l[1],arg) > 0) { SORT_R_SWAP(l[0], l[1], tmp); }
if(compar(l[1],l[2],arg) > 0) {
SORT_R_SWAP(l[1], l[2], tmp);
if(compar(l[0],l[1],arg) > 0) { SORT_R_SWAP(l[0], l[1], tmp); }
}
/* swap mid value (l[1]), and last element to put pivot as last element */
if(l[1] != last) { sort_r_swap(l[1], last, w); }
/*
pl is the next item on the left to be compared to the pivot
pr is the last item on the right that was compared to the pivot
ple is the left position to put the next item that equals the pivot
ple is the last right position where we put an item that equals the pivot
v- end (beyond the array)
EEEEEELLLLLLLLuuuuuuuuGGGGGGGEEEEEEEE.
^- b ^- ple ^- pl ^- pr ^- pre ^- last (where the pivot is)
Pivot comparison key:
E = equal, L = less than, u = unknown, G = greater than, E = equal
*/
pivot = last;
ple = pl = b;
pre = pr = last;
/*
Strategy:
Loop into the list from the left and right at the same time to find:
- an item on the left that is greater than the pivot
- an item on the right that is less than the pivot
Once found, they are swapped and the loop continues.
Meanwhile items that are equal to the pivot are moved to the edges of the
array.
*/
while(pl < pr) {
/* Move left hand items which are equal to the pivot to the far left.
break when we find an item that is greater than the pivot */
for(; pl < pr; pl += w) {
cmp = compar(pl, pivot, arg);
if(cmp > 0) { break; }
else if(cmp == 0) {
if(ple < pl) { sort_r_swap(ple, pl, w); }
ple += w;
}
}
/* break if last batch of left hand items were equal to pivot */
if(pl >= pr) { break; }
/* Move right hand items which are equal to the pivot to the far right.
break when we find an item that is less than the pivot */
for(; pl < pr; ) {
pr -= w; /* Move right pointer onto an unprocessed item */
cmp = compar(pr, pivot, arg);
if(cmp == 0) {
pre -= w;
if(pr < pre) { sort_r_swap(pr, pre, w); }
}
else if(cmp < 0) {
if(pl < pr) { sort_r_swap(pl, pr, w); }
pl += w;
break;
}
}
}
pl = pr; /* pr may have gone below pl */
/*
Now we need to go from: EEELLLGGGGEEEE
to: LLLEEEEEEEGGGG
Pivot comparison key:
E = equal, L = less than, u = unknown, G = greater than, E = equal
*/
sort_r_swap_blocks(b, ple-b, pl-ple);
sort_r_swap_blocks(pr, pre-pr, end-pre);
/*for(size_t i=0; i<nel; i++) {printf("%4i", *(int*)(b + i*sizeof(int)));}
printf("\n");*/
sort_r_simple(b, (pl-ple)/w, w, compar, arg);
sort_r_simple(end-(pre-pr), (pre-pr)/w, w, compar, arg);
}
}
#if defined NESTED_QSORT
static _SORT_R_INLINE void sort_r(void *base, size_t nel, size_t width,
int (*compar)(const void *_a,
const void *_b,
void *aarg),
void *arg)
{
int nested_cmp(const void *a, const void *b)
{
return compar(a, b, arg);
}
qsort(base, nel, width, nested_cmp);
}
#else /* !NESTED_QSORT */
/* Declare structs and functions */
#if defined _SORT_R_BSD
/* Ensure qsort_r is defined */
extern void qsort_r(void *base, size_t nel, size_t width, void *thunk,
int (*compar)(void *_thunk,
const void *_a, const void *_b));
#endif
#if defined _SORT_R_BSD || defined _SORT_R_WINDOWS
/* BSD (qsort_r), Windows (qsort_s) require argument swap */
struct sort_r_data
{
void *arg;
int (*compar)(const void *_a, const void *_b, void *_arg);
};
static _SORT_R_INLINE int sort_r_arg_swap(void *s,
const void *a, const void *b)
{
struct sort_r_data *ss = (struct sort_r_data*)s;
return (ss->compar)(a, b, ss->arg);
}
#endif
#if defined _SORT_R_LINUX
typedef int(* __compar_d_fn_t)(const void *, const void *, void *);
extern void (qsort_r)(void *base, size_t nel, size_t width,
__compar_d_fn_t __compar, void *arg)
__attribute__((nonnull (1, 4)));
#endif
/* implementation */
static _SORT_R_INLINE void sort_r(void *base, size_t nel, size_t width,
int (*compar)(const void *_a,
const void *_b, void *_arg),
void *arg)
{
#if defined _SORT_R_LINUX
#if defined __GLIBC__ && ((__GLIBC__ < 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 8))
/* no qsort_r in glibc before 2.8, need to use nested qsort */
sort_r_simple(base, nel, width, compar, arg);
#else
qsort_r(base, nel, width, compar, arg);
#endif
#elif defined _SORT_R_BSD
struct sort_r_data tmp;
tmp.arg = arg;
tmp.compar = compar;
qsort_r(base, nel, width, &tmp, sort_r_arg_swap);
#elif defined _SORT_R_WINDOWS
struct sort_r_data tmp;
tmp.arg = arg;
tmp.compar = compar;
qsort_s(base, nel, width, sort_r_arg_swap, &tmp);
#else
/* Fall back to our own quicksort implementation */
sort_r_simple(base, nel, width, compar, arg);
#endif
}
#endif /* !NESTED_QSORT */
#undef _SORT_R_INLINE
#undef _SORT_R_WINDOWS
#undef _SORT_R_LINUX
#undef _SORT_R_BSD
#endif /* SORT_R_H_ */

71
lib_main.cpp Normal file
View File

@ -0,0 +1,71 @@
// ~Musa - This is a unity build, where all source files in the project is combined into a single
// translation unit.
// lib_main.cpp can be treated as a single-header library and added to a project like that.
// #TODO: #Library This is quite disorganized. There must be a better way to do this by moving the
// typedefs and procedures that require forward declaration to the top with a metaprogram.
// [ ] Linux / MacOS Ports
// Not implemented as distinct types: singly-linked list, doubly-linked list
// I'll see where it's used most often and see if I can make macros or templates to make
// them easier to use.
// Third party single-header libs:
#include "lib/third_party/sort_r.h"
// #include "lib/meta_generated.h"
#include "lib/Base/Base.h"
#include "lib/Base/Allocator.h"
#include "lib/Base/Array.h"
#include "lib/Base/String.h"
#include "lib/Base/General_Purpose_Allocator.h"
#include "lib/Base/Arena.h"
#include "lib/Base/Arena_Array.h"
#include "lib/Base/String_Builder.h"
#include "lib/Base/Hash_Functions.h"
#include "lib/Base/Hash_Table.h"
#include "lib/Base/Arena_Hash_Table.h"
#if OS_WINDOWS
# include "lib/OS/OS_Win32.h"
#endif
#include "lib/UI/Layout.h"
#include "lib/Base/Logger.h"
#include "lib/Base/Expandable_Arena.h"
#include "lib/Base/Base_Thread_Context.h"
#include "lib/Base/Serializer.h"
#include "lib/Base/Bit_Array.h"
#include "lib/Base/Timing.h"
// #include "lib/Base/Arena_Free_List.cpp"
#include "lib/Base/Arena.cpp"
#include "lib/Base/String.cpp"
#include "lib/Base/Unicode.cpp"
#include "lib/Base/String_Matching.cpp"
#include "lib/Base/RadixSort.cpp"
#include "lib/Base/Base_Thread_Context.cpp"
#include "lib/Base/ErrorType.cpp"
#include "lib/Base/Logger.cpp"
#include "lib/Base/Expandable_Arena.cpp"
#include "lib/Base/Allocator.cpp"
#include "lib/Base/General_Purpose_Allocator.cpp"
// OS-Abstraction Layer
#include "lib/Base/Threads.cpp"
#include "lib/Base/Thread_Group.cpp"
#if OS_WINDOWS
# include "lib/OS/OS_Win32.cpp"
#endif
// #include "lib/Base/File_Search.h"
#include "lib/Graphics.cpp"
// #if OS_LINUX..
// #include "src/OS_Linux.cpp"
// #if OS_MACOS..
// #include "src/OS_MacOS.cpp"

60
src/Base_Entry_Point.cpp Normal file
View File

@ -0,0 +1,60 @@
internal void Main_Entry_Point (int argc, WCHAR **argv);
#if OS_WINDOWS
#if BUILD_CONSOLE_INTERFACE
int wmain(int argc, WCHAR **argv) {
Main_Entry_Point(argc, argv);
return 0;
}
#else
#pragma comment(linker, "/SUBSYSTEM:WINDOWS")
#include <cstdlib> // globals __argc, __wargv
int wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPWSTR lpCmdLine, int nShowCmd) {
Main_Entry_Point(__argc, __wargv);
return 0;
}
#endif
#endif
internal void Main_Entry_Point (int argc, WCHAR **argv) { // #entry_point: Main + Context Setup
// #TODO: Check if base frequency is even available.
u32 base_frequency = (u32)CPU_Base_Frequency();
set_cpu_base_frequency(base_frequency); // REQUIRED FOR TIMING MODULE! will depend on CPU
#if BASE_RUN_TESTS
run_pre_setup_tests(); // #no_context: context will not be initialized at this point.
#endif
// #NOTE: Be careful using a timing or auto-release macros
// before setting up the thread context!
Bootstrap_Main_Thread_Context();
stack_trace(); // #stack_trace: #entry_point (first stack trace)
#if OS_WINDOWS
Win32_Entry_Point(argc, argv);
#endif
#if BASE_RUN_TESTS
run_post_setup_tests();
#endif
#if BUILD_WIN32_APPLICATION // #entry_point: Custom program
// This is deliberately verbose because it is currently platform-specific (win32),
// and we may eventually replace Dear ImGui as the front-end UI framework
// Eventually we will have a unified cross-platform entry point.
ImGui_Win32_Application_Entry_Point();
#endif
#if BUILD_CUSTOM_GUI
// Custom GUI based on Vik's prototype [WIP]
/* Alternative if we're using OpenGL.
graphics_set_render_target(get_main_window());
// 3. Init Graphics (DX11 or OpenGL3)
// #TODO: #Graphics#Main - `Main_Entry_Point`
// 4. [ ] Setup Mouse and Keyboard Inputs
// 5. [ ] Launch second thread; thread groups
*/
#endif
}

112
src/DX11.cpp Normal file
View File

@ -0,0 +1,112 @@
struct D3D11_Texture {
ID3D11Texture2D* texture;
ID3D11ShaderResourceView* out_srv;
s32 out_width;
s32 out_height;
bool loaded = false;
};
// Maybe should be part of ImGui_Supplementary?
struct Image_Data {
u8* data;
s32 width;
s32 height;
s32 depth;
f32 scale_factor = 1.0;
D3D11_Texture d3d_texture;
};
// #NOTE: redefined from imgui_impl_dx11.cpp (not supposed to do that?)
struct ImGui_ImplDX11_Data
{
ID3D11Device* pd3dDevice;
ID3D11DeviceContext* pd3dDeviceContext;
IDXGIFactory* pFactory;
ID3D11Buffer* pVB;
ID3D11Buffer* pIB;
ID3D11VertexShader* pVertexShader;
ID3D11InputLayout* pInputLayout;
ID3D11Buffer* pVertexConstantBuffer;
ID3D11PixelShader* pPixelShader;
ID3D11SamplerState* pTexSamplerLinear;
ID3D11RasterizerState* pRasterizerState;
ID3D11BlendState* pBlendState;
ID3D11DepthStencilState* pDepthStencilState;
int VertexBufferSize;
int IndexBufferSize;
ImVector<DXGI_SWAP_CHAIN_DESC> SwapChainDescsForViewports;
ImGui_ImplDX11_Data() { memset((void*)this, 0, sizeof(*this)); VertexBufferSize = 5000; IndexBufferSize = 10000; }
};
// Also copy-pasted.
static ImGui_ImplDX11_Data* ImGui_ImplDX11_GetBackendData()
{
return ImGui::GetCurrentContext() ? (ImGui_ImplDX11_Data*)ImGui::GetIO().BackendRendererUserData : nullptr;
}
void icon_load_d3d_texture (Image_Data* id) {
ImGui_ImplDX11_Data* bd = ImGui_ImplDX11_GetBackendData();
auto d3d_device = bd->pd3dDevice;
D3D11_TEXTURE2D_DESC desc = {};
desc.Width = id->width;
desc.Height = id->height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; // D3D11_BIND_FLAG
desc.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA sr = {};
sr.pSysMem = id->data;
sr.SysMemPitch = (desc.Width * 4);
sr.SysMemSlicePitch = 0;
HRESULT hr = d3d_device->CreateTexture2D(&desc, &sr, &id->d3d_texture.texture);
if (hr != S_OK) {
log_error("ID3D11Device_CreateBuffer failed: %d\n", hr);
os_log_error();
return;
}
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc = {};
srvDesc.Format = desc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = desc.MipLevels;
srvDesc.Texture2D.MostDetailedMip = 0;
hr = d3d_device->CreateShaderResourceView(id->d3d_texture.texture,
&srvDesc, &id->d3d_texture.out_srv);
if (hr != S_OK) {
log_error("ID3D11Device_CreateShaderResourceView failed: %d\n", hr);
os_log_error();
return;
}
// IUnknown::Release(id->d3d_texture.texture);
id->d3d_texture.texture->Release();
id->d3d_texture.texture = nullptr;
id->d3d_texture.out_width = id->width;
id->d3d_texture.out_height = id->height;
id->d3d_texture.loaded = true;
}
void delete_d3d_texture (D3D11_Texture* t) {
if (!t->loaded) return;
t->out_srv->Release();
t->loaded = false;
}
#define Get_ImTextureID(x) (ImTextureID)(x->out_srv)
void show_d3d_texture_height (D3D11_Texture* t, f32 new_height) {
f32 new_scale_factor = new_height / t->out_height;
f32 new_width = t->out_width * new_scale_factor;
if (t->loaded) {
ImGui::Image(Get_ImTextureID(t), {new_width, new_height}, {0, 0},
{1, 1}, {1, 1, 1, 1}, {0, 0, 0, 0});
}
}

633
src/ImGui_Supplementary.cpp Normal file
View File

@ -0,0 +1,633 @@
// Data
static ID3D11Device* g_pd3dDevice = nullptr;
static ID3D11DeviceContext* g_pd3dDeviceContext = nullptr;
static IDXGISwapChain* g_pSwapChain = nullptr;
static bool g_SwapChainOccluded = false;
static UINT g_ResizeWidth = 0, g_ResizeHeight = 0;
static ID3D11RenderTargetView* g_mainRenderTargetView = nullptr;
// Forward declarations of helper functions
bool CreateDeviceD3D(HWND hWnd);
void CleanupDeviceD3D();
void CreateRenderTarget();
void CleanupRenderTarget();
LRESULT WINAPI ImGui_WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam);
// Helper functions
bool CreateDeviceD3D(HWND hWnd) {
// Setup swap chain
// This is a basic setup. Optimally could use e.g. DXGI_SWAP_EFFECT_FLIP_DISCARD and handle fullscreen mode differently. See #8979 for suggestions.
DXGI_SWAP_CHAIN_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.BufferCount = 2;
sd.BufferDesc.Width = 0;
sd.BufferDesc.Height = 0;
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sd.BufferDesc.RefreshRate.Numerator = 60;
sd.BufferDesc.RefreshRate.Denominator = 1;
sd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
sd.OutputWindow = hWnd;
sd.SampleDesc.Count = 1;
sd.SampleDesc.Quality = 0;
sd.Windowed = TRUE;
sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
UINT createDeviceFlags = 0;
//createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
D3D_FEATURE_LEVEL featureLevel;
const D3D_FEATURE_LEVEL featureLevelArray[2] = { D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_0, };
HRESULT res = D3D11CreateDeviceAndSwapChain(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, createDeviceFlags, featureLevelArray, 2, D3D11_SDK_VERSION, &sd, &g_pSwapChain, &g_pd3dDevice, &featureLevel, &g_pd3dDeviceContext);
if (res == DXGI_ERROR_UNSUPPORTED) // Try high-performance WARP software driver if hardware is not available.
res = D3D11CreateDeviceAndSwapChain(nullptr, D3D_DRIVER_TYPE_WARP, nullptr, createDeviceFlags, featureLevelArray, 2, D3D11_SDK_VERSION, &sd, &g_pSwapChain, &g_pd3dDevice, &featureLevel, &g_pd3dDeviceContext);
if (res != S_OK)
return false;
// Disable DXGI's default Alt+Enter fullscreen behavior.
// - You are free to leave this enabled, but it will not work properly with multiple viewports.
// - This must be done for all windows associated to the device. Our DX11 backend does this automatically for secondary viewports that it creates.
IDXGIFactory* pSwapChainFactory;
if (SUCCEEDED(g_pSwapChain->GetParent(IID_PPV_ARGS(&pSwapChainFactory)))) {
pSwapChainFactory->MakeWindowAssociation(hWnd, DXGI_MWA_NO_ALT_ENTER);
pSwapChainFactory->Release();
}
CreateRenderTarget();
return true;
}
void CleanupDeviceD3D() {
CleanupRenderTarget();
if (g_pSwapChain) { g_pSwapChain->Release(); g_pSwapChain = nullptr; }
if (g_pd3dDeviceContext) { g_pd3dDeviceContext->Release(); g_pd3dDeviceContext = nullptr; }
if (g_pd3dDevice) { g_pd3dDevice->Release(); g_pd3dDevice = nullptr; }
}
void CreateRenderTarget() {
ID3D11Texture2D* pBackBuffer;
g_pSwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer));
g_pd3dDevice->CreateRenderTargetView(pBackBuffer, nullptr, &g_mainRenderTargetView);
pBackBuffer->Release();
}
void CleanupRenderTarget() {
if (g_mainRenderTargetView) { g_mainRenderTargetView->Release(); g_mainRenderTargetView = nullptr; }
}
// Forward declare message handler from imgui_impl_win32.cpp
extern IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam);
// Win32 message handler
// You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs.
// - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data.
// - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data.
// Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags.
LRESULT WINAPI ImGui_WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) {
if (ImGui_ImplWin32_WndProcHandler(hWnd, msg, wParam, lParam))
return true;
switch (msg) {
case WM_SIZE: {
if (wParam == SIZE_MINIMIZED)
return 0;
g_ResizeWidth = (UINT)LOWORD(lParam); // Queue resize
g_ResizeHeight = (UINT)HIWORD(lParam);
} return 0;
case WM_SYSCOMMAND: {
if ((wParam & 0xfff0) == SC_CLOSE) {
::PostQuitMessage(0);
return 0;
}
if ((wParam & 0xfff0) == SC_MINIMIZE) {
Win32_Minimize_Window_To_Tray(get_window_info(hWnd));
return 0;
}
if ((wParam & 0xfff0) == SC_KEYMENU) // Disable ALT application menu
return 0;
} break;
case WM_CREATE: {
// This is pretty dumb to have a single one for global state! See if we can attach it to Window_Info
} return 0;
case WM_TRAYICON: {
// Most applications perform actions on button up (after click), but we do
// on button down to make it feel as responsive as possible.
if (lParam == WM_LBUTTONDOWN) {
Win32_Bring_Window_To_Foreground(get_main_window_pointer());
}
if (lParam == WM_RBUTTONDOWN) {
POINT curPos; GetCursorPos(&curPos);
SetForegroundWindow(hWnd);
Window_Info* info = get_window_info(hWnd);
if (!info) return 0;
u32 clicked = TrackPopupMenu(info->tray_icon_menu, TPM_RETURNCMD | TPM_NONOTIFY, curPos.x, curPos.y, 0, hWnd, nullptr);
if (clicked == ICON_CONTEXT_MENU_ITEM_ID) {
log("(WM_TRAYICON:Exit) Quitting application...");
::PostQuitMessage(0);
}
}
} return 0;
case WM_DESTROY: {
::PostQuitMessage(0);
} return 0;
} // switch (msg)
return ::DefWindowProcW(hWnd, msg, wParam, lParam);
}
// ~ Keyboard Input
struct Key_Combination {
ImGuiKey key = ImGuiKey_None;
bool ctrl;
bool shift;
bool alt;
bool once = true;
bool triggered;
f64 last_key_down_time;
};
struct KB_State {
f64 current_frame_time;
b32 ctrl_key_down;
b32 shift_key_down;
b32 alt_key_down;
};
global KB_State global_keyboard_state;
force_inline bool key_combination_failed(Key_Combination* kc) {
return false;
}
bool ctrl_key_down () { return (ImGui::IsKeyDown(ImGuiKey_LeftCtrl) || ImGui::IsKeyDown(ImGuiKey_RightCtrl)); }
bool shift_key_down () { return (ImGui::IsKeyDown(ImGuiKey_LeftShift) || ImGui::IsKeyDown(ImGuiKey_RightShift)); }
bool alt_key_down () { return (ImGui::IsKeyDown(ImGuiKey_LeftAlt) || ImGui::IsKeyDown(ImGuiKey_RightAlt)); }
bool check_key_combination (Key_Combination* kc) {
kc->triggered = false; // reset
if (kc->last_key_down_time && ImGui::IsKeyReleased(kc->key)) {
kc->last_key_down_time = 0.0;
}
if ( kc->ctrl && !ctrl_key_down()) return key_combination_failed(kc);
if (!kc->ctrl && ctrl_key_down()) return key_combination_failed(kc);
if ( kc->shift && !shift_key_down()) return key_combination_failed(kc);
if (!kc->shift && shift_key_down()) return key_combination_failed(kc);
if ( kc->alt && !alt_key_down()) return key_combination_failed(kc);
if (!kc->alt && alt_key_down()) return key_combination_failed(kc);
if (kc->once) {
kc->triggered = (ImGui::IsKeyDown(kc->key) && kc->last_key_down_time == 0);
if (kc->triggered) {
kc->last_key_down_time = global_keyboard_state.current_frame_time;
}
} else {
kc->triggered = (ImGui::IsKeyDown(kc->key));
}
if (kc->triggered) {
#if BUILD_DEBUG
log("Key triggered: %s%s%s%s | once=%d, ts: %1.3f",
(kc->ctrl) ? "ctrl+" : "",
(kc->shift) ? "shift+" : "",
(kc->alt) ? "alt+" : "",
ImGui::GetKeyName(kc->key), kc->once, kc->last_key_down_time);
#endif
}
return kc->triggered;
}
void update_global_keyboard_state () {
global_keyboard_state.current_frame_time = GetUnixTimestamp();
global_keyboard_state.ctrl_key_down = ctrl_key_down();
global_keyboard_state.shift_key_down = shift_key_down();
global_keyboard_state.alt_key_down = alt_key_down();
}
// ~ ImGui Style
#include "../lib/third_party/dear-imgui/imgui_internal.h"
void Set_Custom_Style () {
ImGuiStyle* style = &ImGui::GetStyle();
ImVec4* colors = style->Colors;
style->Alpha = 1.0;
style->WindowRounding = 3.0f;
style->GrabRounding = 3.0f;
style->GrabMinSize = 20.0f;
style->FrameRounding = 8.0f;
style->TabRounding = 8.0f;
colors[ImGuiCol_Text] = ImVec4(1.00f, 1.00f, 1.00f, 1.00f);
colors[ImGuiCol_TextDisabled] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f);
colors[ImGuiCol_WindowBg] = ImVec4(0.06f, 0.06f, 0.06f, 0.94f);
colors[ImGuiCol_ChildBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f);
colors[ImGuiCol_PopupBg] = ImVec4(0.08f, 0.08f, 0.08f, 0.94f);
colors[ImGuiCol_Border] = ImVec4(0.43f, 0.43f, 0.50f, 0.50f);
colors[ImGuiCol_BorderShadow] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f);
colors[ImGuiCol_FrameBg] = ImVec4(0.16f, 0.29f, 0.48f, 0.54f);
colors[ImGuiCol_FrameBgHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.40f);
colors[ImGuiCol_FrameBgActive] = ImVec4(0.26f, 0.59f, 0.98f, 0.67f);
colors[ImGuiCol_TitleBg] = ImVec4(0.04f, 0.04f, 0.04f, 1.00f);
colors[ImGuiCol_TitleBgActive] = ImVec4(0.16f, 0.29f, 0.48f, 1.00f);
colors[ImGuiCol_TitleBgCollapsed] = ImVec4(0.00f, 0.00f, 0.00f, 0.51f);
colors[ImGuiCol_MenuBarBg] = ImVec4(0.14f, 0.14f, 0.14f, 1.00f);
colors[ImGuiCol_ScrollbarBg] = ImVec4(0.02f, 0.02f, 0.02f, 0.53f);
colors[ImGuiCol_ScrollbarGrab] = ImVec4(0.31f, 0.31f, 0.31f, 1.00f);
colors[ImGuiCol_ScrollbarGrabHovered] = ImVec4(0.41f, 0.41f, 0.41f, 1.00f);
colors[ImGuiCol_ScrollbarGrabActive] = ImVec4(0.51f, 0.51f, 0.51f, 1.00f);
colors[ImGuiCol_CheckMark] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f);
colors[ImGuiCol_SliderGrab] = ImVec4(0.24f, 0.52f, 0.88f, 1.00f);
colors[ImGuiCol_SliderGrabActive] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f);
colors[ImGuiCol_Button] = ImVec4(0.26f, 0.59f, 0.98f, 0.40f);
colors[ImGuiCol_ButtonHovered] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f);
colors[ImGuiCol_ButtonActive] = ImVec4(0.06f, 0.53f, 0.98f, 1.00f);
colors[ImGuiCol_Header] = ImVec4(0.26f, 0.59f, 0.98f, 0.31f);
colors[ImGuiCol_HeaderHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.80f);
colors[ImGuiCol_HeaderActive] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f);
colors[ImGuiCol_Separator] = colors[ImGuiCol_Border];
colors[ImGuiCol_SeparatorHovered] = ImVec4(0.10f, 0.40f, 0.75f, 0.78f);
colors[ImGuiCol_SeparatorActive] = ImVec4(0.10f, 0.40f, 0.75f, 1.00f);
colors[ImGuiCol_ResizeGrip] = ImVec4(0.26f, 0.59f, 0.98f, 0.20f);
colors[ImGuiCol_ResizeGripHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.67f);
colors[ImGuiCol_ResizeGripActive] = ImVec4(0.26f, 0.59f, 0.98f, 0.95f);
colors[ImGuiCol_InputTextCursor] = colors[ImGuiCol_Text];
colors[ImGuiCol_TabHovered] = colors[ImGuiCol_HeaderHovered];
colors[ImGuiCol_Tab] = ImLerp(colors[ImGuiCol_Header], colors[ImGuiCol_TitleBgActive], 0.80f);
colors[ImGuiCol_TabSelected] = ImLerp(colors[ImGuiCol_HeaderActive], colors[ImGuiCol_TitleBgActive], 0.60f);
colors[ImGuiCol_TabSelectedOverline] = colors[ImGuiCol_HeaderActive];
colors[ImGuiCol_TabDimmed] = ImLerp(colors[ImGuiCol_Tab], colors[ImGuiCol_TitleBg], 0.80f);
colors[ImGuiCol_TabDimmedSelected] = ImLerp(colors[ImGuiCol_TabSelected], colors[ImGuiCol_TitleBg], 0.40f);
colors[ImGuiCol_TabDimmedSelectedOverline] = ImVec4(0.50f, 0.50f, 0.50f, 0.00f);
colors[ImGuiCol_DockingPreview] = ImVec4(0.26f, 0.59f, 0.98f, 0.75f); //colors[ImGuiCol_HeaderActive] * ImVec4(1.0f, 1.0f, 1.0f, 0.7f);
colors[ImGuiCol_DockingEmptyBg] = ImVec4(0.20f, 0.20f, 0.20f, 1.00f);
colors[ImGuiCol_PlotLines] = ImVec4(0.61f, 0.61f, 0.61f, 1.00f);
colors[ImGuiCol_PlotLinesHovered] = ImVec4(1.00f, 0.43f, 0.35f, 1.00f);
colors[ImGuiCol_PlotHistogram] = ImVec4(0.90f, 0.70f, 0.00f, 1.00f);
colors[ImGuiCol_PlotHistogramHovered] = ImVec4(1.00f, 0.60f, 0.00f, 1.00f);
colors[ImGuiCol_TableHeaderBg] = ImVec4(0.19f, 0.19f, 0.20f, 1.00f);
colors[ImGuiCol_TableBorderStrong] = ImVec4(0.31f, 0.31f, 0.35f, 1.00f); // Prefer using Alpha=1.0 here
colors[ImGuiCol_TableBorderLight] = ImVec4(0.23f, 0.23f, 0.25f, 1.00f); // Prefer using Alpha=1.0 here
colors[ImGuiCol_TableRowBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f);
colors[ImGuiCol_TableRowBgAlt] = ImVec4(1.00f, 1.00f, 1.00f, 0.06f);
colors[ImGuiCol_TextLink] = colors[ImGuiCol_HeaderActive];
colors[ImGuiCol_TextSelectedBg] = ImVec4(0.26f, 0.59f, 0.98f, 0.35f);
colors[ImGuiCol_TreeLines] = colors[ImGuiCol_Border];
colors[ImGuiCol_DragDropTarget] = ImVec4(1.00f, 1.00f, 0.00f, 0.90f);
colors[ImGuiCol_DragDropTargetBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f);
colors[ImGuiCol_UnsavedMarker] = ImVec4(1.00f, 1.00f, 1.00f, 1.00f);
colors[ImGuiCol_NavCursor] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f);
colors[ImGuiCol_NavWindowingHighlight] = ImVec4(1.00f, 1.00f, 1.00f, 0.70f);
colors[ImGuiCol_NavWindowingDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.20f);
colors[ImGuiCol_ModalWindowDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.35f);
}
struct ImGui_Font_Size_Pair {
f32 font_size;
ImFont* data;
};
struct ImGui_Font_Info {
string font_name;
Array<ImGui_Font_Size_Pair> sizes;
s64 current_size = 1;
};
// We can make a more complicated structure here with a Map<string, Font_Info>
// but we only have one font at this point, so it's redundant
global ImGuiContext* imgui_context;
global f32 imgui_font_sizes[6] = {18, 24, 30, 36, 42, 48};
global ImGui_Font_Info imgui_default_font;
bool ImGui_Default_Font_Loaded () {
// #TODO #font_crash Should only check once at the top of the frame!
return imgui_default_font.sizes.count > 0;
}
f32 ImGui_Default_Font_Current_Size () {
return imgui_default_font.sizes[imgui_default_font.current_size].font_size;
}
void ImGui_Push_Default_Font () {
if (ImGui_Default_Font_Loaded()) {
ImFont* font_data = imgui_default_font.sizes[imgui_default_font.current_size].data;
f32 unscaled_font_size = imgui_default_font.sizes[imgui_default_font.current_size].font_size;
ImGui::PushFont(font_data, unscaled_font_size);
}
}
void ImGui_Pop_Default_Font () {
// #font_crash: This will break if we load font mid-frame! don't do this.
// We should only check this once at the top of the frame!
if (ImGui_Default_Font_Loaded()) {
ImGui::PopFont();
}
}
#define imgui_push_default_font_index(x) \
ImGui_Font_Size Concat(_auto_push_font_, __LINE__)(x)
#define imgui_push_min_font_size() \
imgui_push_default_font_index(0)
#define imgui_push_max_font_size() \
imgui_push_default_font_index(5)
struct ImGui_Font_Size {
ImGui_Font_Size (s64 size_index) {
s64 size_target = clamp<s64>(size_index, 0, 5);
ImFont* font_data = imgui_default_font.sizes[size_target].data;
f32 unscaled_font_size = imgui_default_font.sizes[size_target].font_size;
ImGui::PushFont(font_data, unscaled_font_size);
}
~ImGui_Font_Size () {
ImGui::PopFont();
}
};
void ImGui_Show_Font_Info () {
ImGui::Begin("Font Settings"); // imgui_push_default_font_index(0);
if (!ImGui_Default_Font_Loaded()){
ImGui::Text("Default font not present!");
} else {
ImGui::Text("Current font size: %.1f (index: %d)", ImGui_Default_Font_Current_Size(),
imgui_default_font.current_size);
ImGui::SameLine();
if (ImGui::Button("Decrease Font Size")) { imgui_default_font.current_size = clamp<s64>(imgui_default_font.current_size - 1, 0, 5); }
ImGui::SameLine();
if (ImGui::Button("Increase Font Size")) { imgui_default_font.current_size = clamp<s64>(imgui_default_font.current_size + 1, 0, 5); }
}
ImGui::End();
}
#define push_imgui_window(title) \
Push_ImGui_Window Concat(_push_imgui_window_guard, __LINE__)(title)
#define push_imgui_window_with_flags(title, flags) \
Push_ImGui_Window Concat(_push_imgui_window_guard, __LINE__)(title, flags)
struct Push_ImGui_Window {
Push_ImGui_Window (string name, ImGuiWindowFlags flags = 0) {
ImGui::Begin((char*)name.data, nullptr, flags);
}
~Push_ImGui_Window () {
ImGui::End();
}
};
// Handlers for conversion to ImVec4
force_inline ImVec4 to_ImVec4 (Vec4 v) {
ImVec4 v_out;
memcpy(&v_out, &v, sizeof(ImVec4));
return v_out;
}
void imgui_show_text_with_highlights (string s, ArrayView<bool> highlights, Vec4 text_color, Vec4 highlight_color) {
using namespace ImGui;
Assert(s.count == highlights.count);
ImDrawList* draw_list = GetWindowDrawList();
ImVec2 pos = GetCursorScreenPos();
ImFont* font = GetFont();
f32 font_size = GetFontSize();
ImVec2 text_size = CalcTextSize((char*)to_c_string(s));
Dummy(text_size);
for_each(i, s) {
ImU32 color = GetColorU32(to_ImVec4(text_color));
if (highlights[i]) {
color = GetColorU32(to_ImVec4(highlight_color));
}
u8* next_char = &s[i];
draw_list->AddText(font, font_size, pos, color, (char*)next_char, (char*)next_char+1);
pos.x += CalcTextSize((char*)next_char, (char*)next_char+1).x;
}
}
Vec2 imgui_text_size (string s) {
return to_vec2(ImGui::CalcTextSize((char*)s.data, (char*)s.data + s.count));
}
// #debug#ui
#define push_imgui_table(title, columns, flags, table_size) \
Push_ImGui_Table Concat(_push_imgui_table_guard, __LINE__)(title, columns, flags, table_size)
struct Push_ImGui_Table {
bool table_result = false;
Push_ImGui_Table (string name, s32 column_count, ImGuiTableFlags flags, ImVec2 table_size) {
table_result = ImGui::BeginTable((char*)name.data, column_count, flags, table_size);
}
~Push_ImGui_Table () {
if (table_result) ImGui::EndTable();
}
};
void TableRightAlignedText(u8* text) {
float text_width = ImGui::CalcTextSize((char*)text).x;
float column_width = ImGui::GetColumnWidth();
float padding = ImGui::GetStyle().CellPadding.x;
ImGui::SetCursorPosX(
ImGui::GetCursorPosX() + column_width - text_width - padding * 2.0f
);
ImGui::TextUnformatted((char*)text);
}
void imgui_show_default_allocator_allocations_table () { using namespace ImGui;
#if BUILD_DEBUG
ImGuiTableFlags flags = ImGuiTableFlags_Borders | ImGuiTableFlags_RowBg | ImGuiTableFlags_Reorderable | ImGuiTableFlags_Resizable;
// ImVec2 cursor = GetCursorPos();
// f32 remaining_window_height = GetWindowHeight() - cursor.y - 16.f;
ImVec2 table_size = {-1, 0};
lock(&allocator_mutex);
auto allocations = array_copy(to_view(get_general_allocator_data()->allocations));
unlock(&allocator_mutex);
// Replace this with two sliders min and max to filter allocations?
Checkbox("Show small allocations (<1kB)", &default_allocator_show_small_allocations);
Text("%s in %lld allocations",
format_bytes(get_general_allocator_data()->total_bytes_allocated).data,
allocations.count);
push_imgui_table("Allocations##DefaultAllocator", 4, flags, table_size);
TableSetupColumn("#");
TableSetupColumn("Label");
TableSetupColumn("Allocation Size");
TableSetupColumn("Alignment");
TableHeadersRow();
for_each(a, allocations) {
if (!default_allocator_show_small_allocations && allocations[a].size < 1024) {
continue;
}
TableNextRow();
TableSetColumnIndex(0); // index
Text("%lld", a);
TableSetColumnIndex(1); // label
Text("%s", allocations[a].label.data);
TableSetColumnIndex(2); // size
TableRightAlignedText(format_bytes(allocations[a].size).data);
TableSetColumnIndex(3); // alignment
Text("%02d", allocations[a].alignment);
}
#endif
}
global bool Debug_Show_Thread_Context_Arenas = false;
global bool Debug_Hide_Empty_Arenas = false;
global bool Debug_Hide_Arena_Arrays = false;
void imgui_show_arena_allocations_table () { using namespace ImGui;
#if BUILD_DEBUG
lock_guard(&arenas_in_use_mutex); // arenas can become invalid at any time, so we have to lock the entire operation
Assert(arenas_in_use.allocated > 0);
Array<Arena*> arenas_filtered = array_copy(arenas_in_use);
Checkbox("Show thread context arenas", &Debug_Show_Thread_Context_Arenas);
SameLine();
Checkbox("Hide Empty Arenas", &Debug_Hide_Empty_Arenas);
SameLine();
Checkbox("Hide ArenaArrays", &Debug_Hide_Arena_Arrays);
Array<Arena*> arenas_to_remove = {};
if (!Debug_Show_Thread_Context_Arenas) { //Timed_Block_Print("context_arenas");
context_arenas(thread_context(), &arenas_to_remove, true);
}
for_each(a, arenas_to_remove) {
auto r_arena = arenas_to_remove[a];
array_unordered_remove_by_value(arenas_filtered, r_arena, 1);
}
for_each(af, arenas_filtered) {
auto arena = arenas_filtered[af];
s64 arena_usage = arena_usage_bytes(arena);
s64 arena_committed = arena_usage_committed_bytes(arena);
bool is_arena_array = begins_with(arena->label, "ArenaArray");
if (is_arena_array) {
arena_usage = arena_usage_committed_bytes(arena);
}
if (Debug_Hide_Arena_Arrays && is_arena_array) {
array_unordered_remove_by_value(arenas_filtered, arena, 1);
// array_unordered_remove uses copy-back so we need to recheck this index:
af -= 1;
}
if (Debug_Hide_Empty_Arenas &&
arena_usage == 0 && arena->initial_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE) {
array_unordered_remove_by_value(arenas_filtered, arena, 1);
af -= 1;
}
}
Text("Committed %s in %lld arenas (%lld total)",
format_bytes(committed_bytes(arenas_filtered)).data,
arenas_filtered.count, arenas_in_use.count);
// We should make these clickable or hoverable to get detailed arena info
// or have a separate window for detailed arena info.
ImGuiTableFlags flags = ImGuiTableFlags_Borders | ImGuiTableFlags_RowBg | ImGuiTableFlags_Reorderable | ImGuiTableFlags_Resizable;
ImVec2 table_size = {-1, 0};
push_imgui_table("Allocations##Arenas", 7, flags, table_size);
TableSetupColumn("#");
TableSetupColumn("In use");
TableSetupColumn("Committed");
TableSetupColumn("Label");
TableSetupColumn("Flags");
TableSetupColumn("High Water Mark");
TableSetupColumn("Reserve Limit");
TableHeadersRow();
for_each(a, arenas_filtered) {
auto arena = arenas_filtered[a];
s64 arena_usage = arena_usage_bytes(arena);
s64 arena_committed = arena_usage_committed_bytes(arena);
bool is_arena_array = begins_with(arena->label, "ArenaArray");
if (is_arena_array) {
arena_usage = arena_usage_committed_bytes(arena);
}
// if (arena_is_chained(arena)) {
// #TODO: This API is broken for some reason. Fix it!
// arena_usage = arena_usage_bytes((ExpandableArena*)arena);
// } else {
// arena_usage = arena_usage_bytes(arena);
// }
// #TODO: Flags (so we can see if it's expandable or not).
TableNextRow();
TableSetColumnIndex(0); // index
Text("%lld", a);
TableSetColumnIndex(1); // bytes in use
TableRightAlignedText(format_bytes(arena_usage).data);
TableSetColumnIndex(2); // bytes committed
TableRightAlignedText(format_bytes(arena_committed).data);
TableSetColumnIndex(3); // label
Text("%s", arena->label.data);
TableSetColumnIndex(4); // Flags
Text("0x%X", arena->flags);
TableSetColumnIndex(5); // High water mark
#if BUILD_DEBUG
TableRightAlignedText(format_bytes(arena->high_water_mark).data);
#else
Text("not available in release mode");
#endif
TableSetColumnIndex(6); // reserve size limit
TableRightAlignedText(format_bytes(reserve_size(arena)).data);
// #TODO: if (Debug_Show_Extra_Arena_Details) ...
// Text("[%02d] %s - %s\n @ %s:%d (%s)", a, format_bytes(arena_usage_bytes(arena)).data, arena->label.data,
// arena->file_path.data, arena->line_number, arena->function_name.data);
}
#endif
}
global bool Debug_Show_Debug_Panel = true;
void ImGui_Debug_Panel () { using namespace ImGui;
if (!Debug_Show_Debug_Panel) return;
// ImGui_Show_Font_Info();
push_allocator(temp());
Begin("Debug Panel", &Debug_Show_Debug_Panel);
// #cpuid
Text("[cpus] physical: %d, logical: %d, primary: %d, secondary: %d", os_cpu_physical_core_count(), os_cpu_logical_core_count(), os_cpu_primary_core_count(), os_cpu_secondary_core_count());
#if BUILD_DEBUG
SeparatorText("Default Allocator Allocations");
imgui_show_default_allocator_allocations_table();
SeparatorText("Arenas in Use");
imgui_show_arena_allocations_table();
#endif // BUILD_DEBUG
SeparatorText("Child Threads");
// #TODO: Make this a node tree
SeparatorText("Errors");
ArrayView<Error*> errors = get_all_errors(thread_context());
if (errors.count && Button("Clear all errors")) {
clear_errors(thread_context());
errors.count = 0;
}
for_each(e, errors) {
auto button_label = format_cstring("Clear##%d", e);
if (Button(button_label)) {
clear_error(thread_context(), errors[e]);
continue;
}
SameLine();
Text(" [%d] %s", e, to_string(errors[e]).data);
}
Spacing();
// if (Button("Push some error")) {
// log_warning("This is a warning.");
// log_error("... and this is an error.");
// }
End();
}

178
src/app_main.cpp Normal file
View File

@ -0,0 +1,178 @@
void ImGui_Win32_Application_Entry_Point () {
ImGui_ImplWin32_EnableDpiAwareness();
f32 main_scale = ImGui_ImplWin32_GetDpiScaleForMonitor(::MonitorFromPoint(POINT{ 0, 0 }, MONITOR_DEFAULTTOPRIMARY));
bool success = false;
success = os_create_window("Main Window", nullptr, true, true, false, ImGui_WndProc);
if (!success) {
Assert(false);
ExitProcess(1);
}
get_main_window_pointer()->tray_icon_menu = CreatePopupMenu();
AppendMenuW(get_main_window_pointer()->tray_icon_menu, MF_STRING, ICON_CONTEXT_MENU_ITEM_ID, L"Exit");
HWND hwnd = get_main_window().window;
success = Win32_Load_Main_Window_Icon("tmp.ico");
if (!success) { log_warning("Failed to load tmp.ico"); }
success = Win32_Load_Main_Window_Icon_Minimized("tmp_min.ico");
if (!success) { log_warning("Failed to load tmp_min.ico"); }
// :Win32_Initialize_Extended
Win32_Set_Main_Icon();
Win32_Set_Tray_Icon("Musa's File Explorer");
Win32_Hide_Tray_Icon();
if (!CreateDeviceD3D(hwnd)) {
CleanupDeviceD3D();
return;
}
// Set dark titlebar
s64 dark_mode = 1;
DWORD USE_IMMERSIVE_DARK_MODE = 20;
::DwmSetWindowAttribute(hwnd, USE_IMMERSIVE_DARK_MODE, &dark_mode, sizeof(s64));
// A hacky workaround to display the dark title bar properly. Nothing else we've tried worked
::ShowWindow(hwnd, SW_HIDE);
::ShowWindow(hwnd, SW_SHOW);
// Setup Dear ImGui context
IMGUI_CHECKVERSION();
log("ImGui Version %s \n", ImGui::GetVersion());
imgui_context = ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO(); (void)io;
// io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls
// io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad; // Enable Gamepad Controls
io.ConfigFlags |= ImGuiConfigFlags_DockingEnable; // Enable Docking
io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; // Enable Multi-Viewport / Platform Windows
// io.ConfigViewportsNoAutoMerge = true; // io.ConfigViewportsNoTaskBarIcon = true; // io.ConfigDockingAlwaysTabBar = true;
// io.ConfigDockingTransparentPayload = true;
// Setup Dear ImGui style
// ImGui::StyleColorsDark();
Set_Custom_Style();
// Setup scaling
ImGuiStyle& style = ImGui::GetStyle();
style.ScaleAllSizes(main_scale); // Bake a fixed style scale. (until we have a solution for dynamic style scaling, changing this requires resetting Style + calling this again)
style.FontScaleDpi = main_scale; // Set initial font scale. (using io.ConfigDpiScaleFonts=true makes this unnecessary. We leave both here for documentation purpose)
io.ConfigDpiScaleFonts = true; // [Experimental] Automatically overwrite style.FontScaleDpi in Begin() when Monitor DPI changes. This will scale fonts but _NOT_ scale sizes/padding for now.
io.ConfigDpiScaleViewports = true; // [Experimental] Scale Dear ImGui and Platform Windows when Monitor DPI changes.
// When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones.
if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) {
style.WindowRounding = 0.0f;
style.Colors[ImGuiCol_WindowBg].w = 1.0f;
}
// Setup Platform/Renderer backends
ImGui_ImplWin32_Init(hwnd);
ImGui_ImplDX11_Init(g_pd3dDevice, g_pd3dDeviceContext);
style.FontSizeBase = 24.0f;
//io.Fonts->AddFontDefault();
string font_file_name = "RobotoMono-Regular.ttf";
{ push_allocator_label("ImGui Default Font");
imgui_default_font.sizes.allocator = default_allocator();
imgui_default_font.font_name = font_file_name;
for (s64 i = 0; i < ArrayCount(imgui_font_sizes); i += 1) {
f32 font_size = imgui_font_sizes[i];
ImFont* new_font = io.Fonts->AddFontFromFileTTF((char*)font_file_name.data, font_size);
if (new_font) {
ImGui_Font_Size_Pair pair = {font_size, new_font};
array_add(imgui_default_font.sizes, pair);
}
}
}
bool show_demo_window = true;
bool show_another_window = false;
ImVec4 clear_color = ImVec4(0.45f, 0.55f, 0.60f, 1.00f);
// Main loop
bool done = false;
// Cache HDC before initializing Icons but after setting up D3D11:
get_main_window_pointer()->hdc = GetDC(hwnd);
push_allocator(temp()); // default allocator for temporary UI stuff.
while (!done) {
// Poll and handle messages (inputs, window resize, etc.)
// See the WndProc() function below for our to dispatch events to the Win32 backend.
MSG msg;
while (::PeekMessage(&msg, nullptr, 0U, 0U, PM_REMOVE)) {
::TranslateMessage(&msg);
::DispatchMessage(&msg);
if (msg.message == WM_QUIT) {
done = true;
}
}
// Handle window being minimized or screen locked
if ((g_SwapChainOccluded && g_pSwapChain->Present(0, DXGI_PRESENT_TEST) == DXGI_STATUS_OCCLUDED) || ::IsIconic(hwnd)) {
::Sleep(10);
continue;
}
g_SwapChainOccluded = false;
// Handle window resize (we don't resize directly in the WM_SIZE handler)
if (g_ResizeWidth != 0 && g_ResizeHeight != 0) {
CleanupRenderTarget();
g_pSwapChain->ResizeBuffers(0, g_ResizeWidth, g_ResizeHeight, DXGI_FORMAT_UNKNOWN, 0);
g_ResizeWidth = g_ResizeHeight = 0;
CreateRenderTarget();
}
// Start the Dear ImGui frame
ImGui_ImplDX11_NewFrame();
ImGui_ImplWin32_NewFrame();
ImGui::NewFrame();
ImGui_Push_Default_Font();
// Simple dockspace:
ImGui::DockSpaceOverViewport();
ImGui_Debug_Panel();
ImGui_Pop_Default_Font();
// Rendering
ImGui::Render();
const f32 clear_color_with_alpha[4] = { clear_color.x * clear_color.w, clear_color.y * clear_color.w, clear_color.z * clear_color.w, clear_color.w };
g_pd3dDeviceContext->OMSetRenderTargets(1, &g_mainRenderTargetView, nullptr);
g_pd3dDeviceContext->ClearRenderTargetView(g_mainRenderTargetView, clear_color_with_alpha);
ImGui_ImplDX11_RenderDrawData(ImGui::GetDrawData());
// Update and Render additional Platform Windows
if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) {
ImGui::UpdatePlatformWindows();
ImGui::RenderPlatformWindowsDefault();
}
// Present
HRESULT hr = g_pSwapChain->Present(1, 0); // Present with vsync
// HRESULT hr = g_pSwapChain->Present(0, 0); // Present without vsync
g_SwapChainOccluded = (hr == DXGI_STATUS_OCCLUDED);
temp_reset_keeping_memory(); // reset_temporary_storage
} // while (!done)
// Cleanupre
ImGui_ImplDX11_Shutdown();
ImGui_ImplWin32_Shutdown();
ImGui::DestroyContext();
CleanupDeviceD3D();
::DestroyWindow(hwnd);
Win32_Hide_Tray_Icon();
}