Initial commit
This commit is contained in:
commit
e0a515017d
364
.gitignore
vendored
Normal file
364
.gitignore
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
## Ignore Visual Studio temporary files, build results, and
|
||||
## files generated by popular Visual Studio add-ons.
|
||||
##
|
||||
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
|
||||
|
||||
# User-specific files
|
||||
*.suo
|
||||
*.user
|
||||
*.userosscache
|
||||
*.sln.docstates
|
||||
|
||||
# User-specific files (MonoDevelop/Xamarin Studio)
|
||||
*.userprefs
|
||||
|
||||
# Build results
|
||||
[Dd]ebug/
|
||||
[Dd]ebugPublic/
|
||||
[Rr]elease/
|
||||
[Rr]eleases/
|
||||
x64/
|
||||
x86/
|
||||
bld/
|
||||
[Bb]in/
|
||||
[Oo]bj/
|
||||
[Ll]og/
|
||||
|
||||
# Visual Studio 2015/2017 cache/options directory
|
||||
.vs/
|
||||
# Uncomment if you have tasks that create the project's static files in wwwroot
|
||||
#wwwroot/
|
||||
|
||||
# Visual Studio 2017 auto generated files
|
||||
Generated\ Files/
|
||||
|
||||
# MSTest test Results
|
||||
[Tt]est[Rr]esult*/
|
||||
[Bb]uild[Ll]og.*
|
||||
|
||||
# NUNIT
|
||||
*.VisualState.xml
|
||||
TestResult.xml
|
||||
|
||||
# Build Results of an ATL Project
|
||||
[Dd]ebugPS/
|
||||
[Rr]eleasePS/
|
||||
dlldata.c
|
||||
|
||||
# Benchmark Results
|
||||
BenchmarkDotNet.Artifacts/
|
||||
|
||||
# .NET Core
|
||||
project.lock.json
|
||||
project.fragment.lock.json
|
||||
artifacts/
|
||||
|
||||
# StyleCop
|
||||
StyleCopReport.xml
|
||||
|
||||
# Files built by Visual Studio
|
||||
*_i.c
|
||||
*_p.c
|
||||
*_i.h
|
||||
*.ilk
|
||||
*.meta
|
||||
*.obj
|
||||
*.iobj
|
||||
*.pch
|
||||
*.pdb
|
||||
*.ipdb
|
||||
*.pgc
|
||||
*.pgd
|
||||
*.rsp
|
||||
*.sbr
|
||||
*.tlb
|
||||
*.tli
|
||||
*.tlh
|
||||
*.tmp
|
||||
*.tmp_proj
|
||||
*.log
|
||||
*.vspscc
|
||||
*.vssscc
|
||||
.builds
|
||||
*.pidb
|
||||
*.svclog
|
||||
*.scc
|
||||
|
||||
# Chutzpah Test files
|
||||
_Chutzpah*
|
||||
|
||||
# Visual C++ cache files
|
||||
ipch/
|
||||
*.aps
|
||||
*.ncb
|
||||
*.opendb
|
||||
*.opensdf
|
||||
*.sdf
|
||||
*.cachefile
|
||||
*.VC.db
|
||||
*.VC.VC.opendb
|
||||
|
||||
# Visual Studio profiler
|
||||
*.psess
|
||||
*.vsp
|
||||
*.vspx
|
||||
*.sap
|
||||
|
||||
# Visual Studio Trace Files
|
||||
*.e2e
|
||||
|
||||
# TFS 2012 Local Workspace
|
||||
$tf/
|
||||
|
||||
# Guidance Automation Toolkit
|
||||
*.gpState
|
||||
|
||||
# ReSharper is a .NET coding add-in
|
||||
_ReSharper*/
|
||||
*.[Rr]e[Ss]harper
|
||||
*.DotSettings.user
|
||||
|
||||
# JustCode is a .NET coding add-in
|
||||
.JustCode
|
||||
|
||||
# TeamCity is a build add-in
|
||||
_TeamCity*
|
||||
|
||||
# DotCover is a Code Coverage Tool
|
||||
*.dotCover
|
||||
|
||||
# AxoCover is a Code Coverage Tool
|
||||
.axoCover/*
|
||||
!.axoCover/settings.json
|
||||
|
||||
# Visual Studio code coverage results
|
||||
*.coverage
|
||||
*.coveragexml
|
||||
|
||||
# NCrunch
|
||||
_NCrunch_*
|
||||
.*crunch*.local.xml
|
||||
nCrunchTemp_*
|
||||
|
||||
# MightyMoose
|
||||
*.mm.*
|
||||
AutoTest.Net/
|
||||
|
||||
# Web workbench (sass)
|
||||
.sass-cache/
|
||||
|
||||
# Installshield output folder
|
||||
[Ee]xpress/
|
||||
|
||||
# DocProject is a documentation generator add-in
|
||||
DocProject/buildhelp/
|
||||
DocProject/Help/*.HxT
|
||||
DocProject/Help/*.HxC
|
||||
DocProject/Help/*.hhc
|
||||
DocProject/Help/*.hhk
|
||||
DocProject/Help/*.hhp
|
||||
DocProject/Help/Html2
|
||||
DocProject/Help/html
|
||||
|
||||
# Click-Once directory
|
||||
publish/
|
||||
|
||||
# Publish Web Output
|
||||
*.[Pp]ublish.xml
|
||||
*.azurePubxml
|
||||
# Note: Comment the next line if you want to checkin your web deploy settings,
|
||||
# but database connection strings (with potential passwords) will be unencrypted
|
||||
*.pubxml
|
||||
*.publishproj
|
||||
|
||||
# Microsoft Azure Web App publish settings. Comment the next line if you want to
|
||||
# checkin your Azure Web App publish settings, but sensitive information contained
|
||||
# in these scripts will be unencrypted
|
||||
PublishScripts/
|
||||
|
||||
# NuGet Packages
|
||||
*.nupkg
|
||||
# The packages folder can be ignored because of Package Restore
|
||||
**/[Pp]ackages/*
|
||||
# except build/, which is used as an MSBuild target.
|
||||
!**/[Pp]ackages/build/
|
||||
# Uncomment if necessary however generally it will be regenerated when needed
|
||||
#!**/[Pp]ackages/repositories.config
|
||||
# NuGet v3's project.json files produces more ignorable files
|
||||
*.nuget.props
|
||||
*.nuget.targets
|
||||
|
||||
# Microsoft Azure Build Output
|
||||
csx/
|
||||
*.build.csdef
|
||||
|
||||
# Microsoft Azure Emulator
|
||||
ecf/
|
||||
rcf/
|
||||
|
||||
# Windows Store app package directories and files
|
||||
AppPackages/
|
||||
BundleArtifacts/
|
||||
Package.StoreAssociation.xml
|
||||
_pkginfo.txt
|
||||
*.appx
|
||||
|
||||
# Visual Studio cache files
|
||||
# files ending in .cache can be ignored
|
||||
*.[Cc]ache
|
||||
# but keep track of directories ending in .cache
|
||||
!*.[Cc]ache/
|
||||
|
||||
# Others
|
||||
ClientBin/
|
||||
~$*
|
||||
*~
|
||||
*.dbmdl
|
||||
*.dbproj.schemaview
|
||||
*.jfm
|
||||
*.pfx
|
||||
*.publishsettings
|
||||
orleans.codegen.cs
|
||||
|
||||
# Including strong name files can present a security risk
|
||||
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
|
||||
#*.snk
|
||||
|
||||
# Since there are multiple workflows, uncomment next line to ignore bower_components
|
||||
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
|
||||
#bower_components/
|
||||
|
||||
# RIA/Silverlight projects
|
||||
Generated_Code/
|
||||
|
||||
# Backup & report files from converting an old project file
|
||||
# to a newer Visual Studio version. Backup files are not needed,
|
||||
# because we have git ;-)
|
||||
_UpgradeReport_Files/
|
||||
Backup*/
|
||||
UpgradeLog*.XML
|
||||
UpgradeLog*.htm
|
||||
ServiceFabricBackup/
|
||||
*.rptproj.bak
|
||||
|
||||
# SQL Server files
|
||||
*.mdf
|
||||
*.ldf
|
||||
*.ndf
|
||||
|
||||
# Business Intelligence projects
|
||||
*.rdl.data
|
||||
*.bim.layout
|
||||
*.bim_*.settings
|
||||
*.rptproj.rsuser
|
||||
|
||||
# Microsoft Fakes
|
||||
FakesAssemblies/
|
||||
|
||||
# GhostDoc plugin setting file
|
||||
*.GhostDoc.xml
|
||||
|
||||
# Node.js Tools for Visual Studio
|
||||
.ntvs_analysis.dat
|
||||
node_modules/
|
||||
|
||||
# Visual Studio 6 build log
|
||||
*.plg
|
||||
|
||||
# Visual Studio 6 workspace options file
|
||||
*.opt
|
||||
|
||||
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
|
||||
*.vbw
|
||||
|
||||
# Visual Studio LightSwitch build output
|
||||
**/*.HTMLClient/GeneratedArtifacts
|
||||
**/*.DesktopClient/GeneratedArtifacts
|
||||
**/*.DesktopClient/ModelManifest.xml
|
||||
**/*.Server/GeneratedArtifacts
|
||||
**/*.Server/ModelManifest.xml
|
||||
_Pvt_Extensions
|
||||
|
||||
# Paket dependency manager
|
||||
.paket/paket.exe
|
||||
paket-files/
|
||||
|
||||
# FAKE - F# Make
|
||||
.fake/
|
||||
|
||||
# JetBrains Rider
|
||||
.idea/
|
||||
*.sln.iml
|
||||
|
||||
# CodeRush
|
||||
.cr/
|
||||
|
||||
# Python Tools for Visual Studio (PTVS)
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.egg-info
|
||||
*.egg
|
||||
|
||||
# Cake - Uncomment if you are using it
|
||||
# tools/**
|
||||
# !tools/packages.config
|
||||
|
||||
# Tabs Studio
|
||||
*.tss
|
||||
|
||||
# Telerik's JustMock configuration file
|
||||
*.jmconfig
|
||||
|
||||
# BizTalk build output
|
||||
*.btp.cs
|
||||
*.btm.cs
|
||||
*.odx.cs
|
||||
*.xsd.cs
|
||||
|
||||
# OpenCover UI analysis results
|
||||
OpenCover/
|
||||
|
||||
# Azure Stream Analytics local run output
|
||||
ASALocalRun/
|
||||
|
||||
# MSBuild Binary and Structured Log
|
||||
*.binlog
|
||||
|
||||
# NVidia Nsight GPU debugger configuration file
|
||||
*.nvuser
|
||||
|
||||
# MFractors (Xamarin productivity tool) working folder
|
||||
.mfractor/
|
||||
|
||||
.exe
|
||||
.raw
|
||||
.txt
|
||||
*.csv
|
||||
*.pickle
|
||||
*.onnx
|
||||
|
||||
#Environment file
|
||||
.env
|
||||
.vscode/
|
||||
|
||||
installed*
|
||||
compiled/
|
||||
python/flowcat.egg-info/
|
||||
.Rproj.user
|
||||
|
||||
# Docs build files
|
||||
docs/_build/*
|
||||
docs/Doxyfile*
|
||||
|
||||
# CMake & GNU Make
|
||||
CMakeCache.txt
|
||||
CMakeFiles/
|
||||
Makefile
|
||||
|
||||
# Libraries
|
||||
*.so
|
||||
*.a
|
||||
CMakeSettings.json
|
||||
._.gitignore
|
||||
|
||||
build/
|
||||
142
build.jai
Normal file
142
build.jai
Normal file
@ -0,0 +1,142 @@
|
||||
VERSION :: "0.1a";
|
||||
|
||||
#run,stallable build_cpp_project();
|
||||
|
||||
LIB_BASE_NAME :: "musa-lib";
|
||||
EXE_BASE_NAME :: "musa";
|
||||
|
||||
LIB_SOURCE_FILENAMES :: string.["unity_build_lib.cpp"];
|
||||
EXE_SOURCE_FILENAMES :: string.["unity_build_exe.cpp"];
|
||||
|
||||
INCLUDE_DIRS :: string.[
|
||||
"src",
|
||||
"lib",
|
||||
"lib/api",
|
||||
"third_party"
|
||||
];
|
||||
|
||||
build_cpp_project :: () {
|
||||
start := seconds_since_init();
|
||||
|
||||
set_build_options_dc(.{do_output=false});
|
||||
options := get_build_options();
|
||||
args := options.compile_time_command_line;
|
||||
|
||||
compile_debug := true;
|
||||
if array_find(args, "release") { compile_debug = false; }
|
||||
build_lib := array_find(args, "build_lib");
|
||||
build_exe := array_find(args, "build_exe");
|
||||
|
||||
generate_meta_file(compile_debug);
|
||||
if build_lib build_cpp_lib(compile_debug);
|
||||
if build_exe build_cpp_exe(compile_debug);
|
||||
|
||||
print("\nFull build time: % seconds\n\n", FF(seconds_since_init() - start, 3));
|
||||
}
|
||||
|
||||
build_cpp_exe :: (compile_debug: bool) {
|
||||
extra: [..]string;
|
||||
if os_target == {
|
||||
case .WINDOWS;
|
||||
array_add(*extra, "-D_CRT_SECURE_NO_WARNINGS", "-D_SILENCE_TR1_NAMESPACE_DEPRECATION_WARNING", "-D_CRT_NONSTDC_NO_DEPRECATE");
|
||||
array_add(*extra, "-D_USE_MATH_DEFINES"); // , "-D_WIN32_WINNT=0x0A00", "/utf-8"
|
||||
|
||||
array_add(*extra, "/MT"); // Static - The default
|
||||
|
||||
for INCLUDE_DIRS array_add(*extra, tprint("/I%", it));
|
||||
case; assert(false, "Other OSes not supported yet!");
|
||||
}
|
||||
|
||||
exe_path := tprint("bin/%", EXE_BASE_NAME);
|
||||
make_directory_if_it_does_not_exist("bin");
|
||||
|
||||
success := build_cpp_executable(exe_path, ..EXE_SOURCE_FILENAMES, debug=compile_debug, extra=extra);
|
||||
print("\nbuild_cpp_executable, success: %\n", success);
|
||||
}
|
||||
|
||||
build_cpp_lib :: (compile_debug: bool) -> bool {
|
||||
lib_path := tprint("build/Debug/%", LIB_BASE_NAME);
|
||||
if !compile_debug {
|
||||
lib_path = tprint("build/Release/%", LIB_BASE_NAME);
|
||||
}
|
||||
lib_directory := path_strip_filename(lib_path);
|
||||
make_directory_if_it_does_not_exist(lib_directory, recursive = true);
|
||||
print("Output lib_path: %\n\n", lib_path);
|
||||
|
||||
extra: [..]string;
|
||||
if os_target == {
|
||||
case .WINDOWS;
|
||||
array_add(*extra, "-D_CRT_SECURE_NO_WARNINGS", "-D_SILENCE_TR1_NAMESPACE_DEPRECATION_WARNING", "-D_CRT_NONSTDC_NO_DEPRECATE");
|
||||
array_add(*extra, "-D_USE_MATH_DEFINES"); // , "-D_WIN32_WINNT=0x0A00", "/utf-8"
|
||||
|
||||
// array_add(*extra, "/MD"); // Dynamic
|
||||
array_add(*extra, "/MT"); // Static - The default
|
||||
|
||||
for INCLUDE_DIRS array_add(*extra, tprint("/I%", it));
|
||||
case .LINUX;
|
||||
array_add(*extra, "-fPIC");
|
||||
array_add(*extra, tprint("-I/%/%", #filepath, "JIIM_Library"));
|
||||
for INCLUDE_DIRS array_add(*extra, tprint("-I/%/%", #filepath, it));
|
||||
case; assert(false, "Other OSes not supported yet!");
|
||||
}
|
||||
|
||||
success := build_cpp_static_lib(lib_path, ..LIB_SOURCE_FILENAMES, debug=compile_debug, extra=extra);
|
||||
print("\nbuild_cpp_static_lib, success: %\n", success);
|
||||
return success;
|
||||
}
|
||||
|
||||
META_GENERATED_HEADER_FILE_PATH :: "src/include/meta_generated.h";
|
||||
|
||||
cpu_target: CPU_Tag = .X64;
|
||||
os_target: Operating_System_Tag = .WINDOWS;
|
||||
|
||||
generate_meta_file :: (debug: bool) {
|
||||
sb: String_Builder;
|
||||
append(*sb, "#pragma once\n\n");
|
||||
print_to_builder(*sb, "constexpr const char* MUSA_LIB_VERSION = \"%\";\n", VERSION);
|
||||
print_to_builder(*sb, "#define BUILD_DEBUG %\n", cast(s32)debug);
|
||||
|
||||
print_to_builder(*sb, "#define OS_WINDOWS %\n", ifx os_target == .WINDOWS then 1 else 0);
|
||||
print_to_builder(*sb, "#define OS_LINUX %\n", ifx os_target == .LINUX then 1 else 0);
|
||||
print_to_builder(*sb, "#define OS_MACOS %\n", ifx os_target == .MACOS then 1 else 0);
|
||||
print_to_builder(*sb, "#define OS_ANDROID %\n", ifx os_target == .ANDROID then 1 else 0);
|
||||
print_to_builder(*sb, "#define OS_IOS %\n", ifx os_target == .IOS then 1 else 0);
|
||||
|
||||
print_to_builder(*sb, "#define ARCH_CPU_X64 %\n", ifx cpu_target == .X64 then 1 else 0);
|
||||
print_to_builder(*sb, "#define ARCH_CPU_ARM64 %\n", ifx cpu_target == .ARM64 then 1 else 0);
|
||||
|
||||
os_is_unix := os_target == .MACOS || os_target == .LINUX || os_target == .IOS || os_target == .ANDROID;
|
||||
print_to_builder(*sb, "#define OS_IS_UNIX %\n", ifx os_is_unix then 1 else 0);
|
||||
|
||||
print_to_builder(*sb, "#define COMPILER_MSVC %\n", ifx os_target == .WINDOWS then 1 else 0);
|
||||
print_to_builder(*sb, "#define COMPILER_CLANG %\n", ifx os_target != .WINDOWS then 1 else 0);
|
||||
|
||||
print_to_builder(*sb, "#define ARRAY_ENABLE_BOUNDS_CHECKING %\n", cast(s32)debug);
|
||||
|
||||
append(*sb, "#define COMPILER_GCC 0\n");
|
||||
|
||||
meta_file_data := builder_to_string(*sb);
|
||||
write_entire_file(META_GENERATED_HEADER_FILE_PATH, meta_file_data);
|
||||
|
||||
print("Generated meta header at % for % on %\n", META_GENERATED_HEADER_FILE_PATH, cpu_target, os_target);
|
||||
}
|
||||
|
||||
#import "Basic";
|
||||
#import "BuildCpp";
|
||||
#import "Compiler";
|
||||
#import "File";
|
||||
#import "File_Utilities";
|
||||
#import "String";
|
||||
#import "System";
|
||||
#import "Process";
|
||||
|
||||
#if OS == .WINDOWS {
|
||||
#import "Ico_File";
|
||||
#import "Windows_Resources";
|
||||
#import "Windows_Utf8";
|
||||
}
|
||||
// Note: Other operating systems are not supported for this application (yet).
|
||||
|
||||
FF :: (val: float64, width:=1) -> FormatFloat #expand {
|
||||
return formatFloat(val, trailing_width = width, zero_removal=.NO);
|
||||
} @Utility
|
||||
15
cpp_lib.todo
Normal file
15
cpp_lib.todo
Normal file
@ -0,0 +1,15 @@
|
||||
[#TODO]
|
||||
|
||||
[*] This is the order I want to follow, because I need them in thread context.
|
||||
- Allocator interface
|
||||
- Thread context (Base_Thread_Context.h)
|
||||
- Arenas (copy from Ex1)
|
||||
- Entry point(s) `Entry_Point_Main.cpp`
|
||||
-> We can have a lib_init() instead of a clear entry point for now.
|
||||
-> Switch from library to application once I add an entry point
|
||||
-> See how rjf abstracts his entry points for each platform with TCTX.
|
||||
[ ] 1. setup thread-local storage via thread_static (see raddbg, base_core.h,
|
||||
C_LINKAGE thread_static TCTX *tctx_thread_local;
|
||||
>> Must be assigned at entry point (arena_alloc())
|
||||
>> TCTX defined in base_thread_context.h
|
||||
> See Base_Thread.h
|
||||
29
lib/Base/Allocator.cpp
Normal file
29
lib/Base/Allocator.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
#include "Allocator.h"
|
||||
#include "Base_Thread_Context.h"
|
||||
|
||||
force_inline Allocator get_temp_allocator() {
|
||||
return get_allocator(get_thread_context()->temp);
|
||||
}
|
||||
|
||||
force_inline Allocator get_context_allocator() {
|
||||
Thread_Context* context = get_thread_context();
|
||||
return context->allocator;
|
||||
}
|
||||
|
||||
void* internal_alloc (s64 size) {
|
||||
Allocator allocator = get_context_allocator();
|
||||
void* result = allocator.proc(Allocator_Mode::ALLOCATE, size, 0, nullptr, allocator.data);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* internal_realloc (void* memory, s64 size, s64 old_size) {
|
||||
Allocator allocator = get_context_allocator();
|
||||
void* result = allocator.proc(Allocator_Mode::RESIZE, size, 0, nullptr, allocator.data);
|
||||
return result;
|
||||
}
|
||||
|
||||
void internal_free (void* memory) {
|
||||
Allocator allocator = get_context_allocator();
|
||||
allocator.proc(Allocator_Mode::DEALLOCATE, 0, 0, memory, allocator.data);
|
||||
}
|
||||
|
||||
57
lib/Base/Allocator.h
Normal file
57
lib/Base/Allocator.h
Normal file
@ -0,0 +1,57 @@
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
|
||||
#include "error-codes.h"
|
||||
|
||||
enum class Allocator_Mode: s32 {
|
||||
ALLOCATE = 0,
|
||||
RESIZE = 1,
|
||||
DEALLOCATE = 2,
|
||||
// IS_THIS_YOURS = 3,
|
||||
// DETAILS = 4,
|
||||
};
|
||||
|
||||
typedef void* (*Allocator_Proc)(Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
|
||||
struct Allocator {
|
||||
Allocator_Proc proc;
|
||||
void* data;
|
||||
};
|
||||
|
||||
// Public Allocator API:
|
||||
// Thread-local allocators:
|
||||
PROTOTYPING_API Allocator get_temp_allocator();
|
||||
PROTOTYPING_API Allocator get_context_allocator();
|
||||
|
||||
// Note that alignment is handled on a per-allocator basis.
|
||||
void* internal_alloc (s64 size);
|
||||
void internal_free (void* memory);
|
||||
void* internal_realloc (void* memory, s64 size, s64 old_size);
|
||||
|
||||
template <typename T>
|
||||
void Initialize (T* memory) { (*memory) = T(); }
|
||||
|
||||
template <typename T> T* New (Allocator allocator, bool initialize=true) {
|
||||
auto memory = (T*)allocator.proc(Allocator_Mode::ALLOCATE, sizeof(T), 0, nullptr, allocator.data);
|
||||
|
||||
if (initialize) {
|
||||
(*memory) = T();
|
||||
}
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
template <typename T> T* New (bool initialize=true) {
|
||||
auto memory = (T*)internal_alloc(sizeof(T));
|
||||
|
||||
if (initialize) {
|
||||
(*memory) = T();
|
||||
}
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
void Delete (void* object) { internal_free(object); }
|
||||
|
||||
// template <typename T> T* NewArray(s64 count, bool initialize) // should be in Array.jai
|
||||
182
lib/Base/Arena.cpp
Normal file
182
lib/Base/Arena.cpp
Normal file
@ -0,0 +1,182 @@
|
||||
#pragma once
|
||||
|
||||
#include "Arena.h"
|
||||
#include "Arena_Windows.cpp"
|
||||
|
||||
constexpr u16 ARENA_DEFAULT_ALIGNMENT = CPU_REGISTER_WIDTH_BYTES;
|
||||
|
||||
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
|
||||
Arena* arena = (Arena*)allocator_data;
|
||||
Assert(arena != nullptr);
|
||||
switch (mode) {
|
||||
case Allocator_Mode::ALLOCATE: {
|
||||
return arena_alloc(arena, requested_size);
|
||||
} break;
|
||||
case Allocator_Mode::RESIZE: {
|
||||
Assert(false); // DO NOT USE RESIZE WITH ARENAS!
|
||||
// Either use Array, which is backed by the general purpose allocator
|
||||
// or use ArenaArray if you need to expand the size.
|
||||
void* result = arena_alloc(arena, requested_size);
|
||||
s64 size_to_copy = old_size < requested_size ? old_size : requested_size;
|
||||
if (result && size_to_copy) {
|
||||
memcpy(result, old_memory, size_to_copy);
|
||||
}
|
||||
return result;
|
||||
} break;
|
||||
case Allocator_Mode::DEALLOCATE:
|
||||
return nullptr; // unused
|
||||
break;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// See arena_delete for decommissioning of arenas initialized with arena_init
|
||||
void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 default_commit_page_count) {
|
||||
Assert(arena != nullptr);
|
||||
|
||||
s64 commit_size = default_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE;
|
||||
Assert(commit_size >= 0);
|
||||
|
||||
if (!is_valid(arena)) {
|
||||
arena->reserve_size = new_reserve;
|
||||
|
||||
platform_init(arena, reserve_size(arena));
|
||||
arena->current_point = arena_start(arena);
|
||||
arena->first_uncommitted_page = arena->memory_base;
|
||||
if (arena_commit_first_pages(arena, commit_size)) {
|
||||
arena->initial_commit_page_count = default_commit_page_count;
|
||||
// arena_lock_pages(arena, arena->memory_base, commit_size);
|
||||
}
|
||||
|
||||
arena->alignment = ARENA_DEFAULT_ALIGNMENT;
|
||||
arena->flags = (arena->flags | (~Arena_Flags::Is_Bootstrapped)); // unset is bootstrapped, if present.
|
||||
}
|
||||
}
|
||||
|
||||
bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset) {
|
||||
Assert(commit_size > 0); // no point calling this without commit size
|
||||
Assert(start_offset >= 0); // should NOT be negative.
|
||||
|
||||
if (commit_size <= 0 || start_offset < 0) {
|
||||
return false;
|
||||
}
|
||||
// bool success = true;
|
||||
u8* final_address = Align<u8*>(arena->memory_base + commit_size + start_offset,
|
||||
arena->alignment);
|
||||
s64 commit_size_final = (s64)(final_address - arena->memory_base);
|
||||
|
||||
if (commit_size_final > reserve_size(arena)) {
|
||||
Assert(false); // Commit size exceeds reserve size
|
||||
return false;
|
||||
}
|
||||
|
||||
if (final_address > arena->first_uncommitted_page) {
|
||||
extend_committed_pages(arena, final_address);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Arena* bootstrap_arena (Arena_Reserve new_reserve, s32 default_commit_page_count) {
|
||||
s64 commit_size = default_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE;
|
||||
Assert(commit_size <= reserve_size(new_reserve));
|
||||
|
||||
Arena new_arena;
|
||||
arena_init(&new_arena, new_reserve, default_commit_page_count);
|
||||
Arena* arena_ptr = (Arena*)new_arena.memory_base;
|
||||
if (arena_ptr == nullptr) { Assert(false); // Assert in debug mode!
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
memcpy(arena_ptr, &new_arena, sizeof(Arena));
|
||||
arena_ptr->current_point = arena_start(arena_ptr);
|
||||
|
||||
return arena_ptr;
|
||||
}
|
||||
|
||||
void arena_clear_flags (Arena* arena) { arena->flags = Arena_Flags::None; }
|
||||
void arena_set_bootstrap_flag (Arena* arena) { arena->flags |= Arena_Flags::Is_Bootstrapped; }
|
||||
// void arena_set_secure_flag (Arena* arena) { arena->flags |= Arena_Flags::Secure_Arena; }
|
||||
|
||||
void arena_reset_keeping_memory (Arena* arena) {
|
||||
if (!is_valid(arena)) return;
|
||||
arena->current_point = arena_start(arena);
|
||||
}
|
||||
|
||||
void arena_reset (Arena* arena) {
|
||||
if (!is_valid(arena)) return;
|
||||
|
||||
free_pages_down_to(arena, arena->initial_commit_page_count);
|
||||
}
|
||||
|
||||
void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_function) {
|
||||
if (!is_valid(arena)) return;
|
||||
Assert(wipe_function != nullptr);
|
||||
if (wipe_function == nullptr) return;
|
||||
wipe_function(arena_start(arena), (u64)(arena->current_point - arena_start(arena)));
|
||||
}
|
||||
|
||||
void* arena_alloc (Arena* arena, s64 byte_count) {
|
||||
Assert(arena != nullptr);
|
||||
|
||||
if (!arena->memory_base) { arena_init(arena, arena->reserve_size); }
|
||||
Assert(is_valid(arena));
|
||||
|
||||
u8* result = Align<u8*>(arena->current_point, arena->alignment);
|
||||
u8* result_end = result + byte_count;
|
||||
|
||||
if (result_end > arena->first_uncommitted_page) {
|
||||
if (result_end > arena_address_limit(arena)) {
|
||||
// #TODO: Log error here:
|
||||
Assert(false); // Failed to allocate because arena is full and cannot expand!
|
||||
} else {
|
||||
extend_committed_pages(arena, result_end);
|
||||
}
|
||||
}
|
||||
|
||||
arena->current_point = result_end;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
u8* arena_start (Arena* arena) {
|
||||
// This assumes every arena is bootstrapped, so there may be some
|
||||
// wastage/cache misses caused by this!
|
||||
return Align(arena->memory_base + sizeof(Arena), ARENA_DEFAULT_ALIGNMENT);
|
||||
}
|
||||
|
||||
u8* arena_address_limit (Arena* arena) {
|
||||
return (arena->memory_base + reserve_size(arena));
|
||||
}
|
||||
|
||||
bool is_valid (Arena* arena) {
|
||||
return (arena != nullptr) && (arena->memory_base != nullptr);
|
||||
}
|
||||
|
||||
s64 reserve_size (Arena* arena) {
|
||||
return reserve_size(arena->reserve_size);
|
||||
}
|
||||
|
||||
s64 reserve_size (Arena_Reserve ar) {
|
||||
switch (ar) {
|
||||
case Arena_Reserve::Size_64K: return KB(64);
|
||||
case Arena_Reserve::Size_2M: return MB(2);
|
||||
case Arena_Reserve::Size_64M: return MB(64);
|
||||
case Arena_Reserve::Size_2G: return GB(2);
|
||||
case Arena_Reserve::Size_64G: return GB(64);
|
||||
case Arena_Reserve::Size_2T: return TB(2);
|
||||
}
|
||||
|
||||
Assert(false); // This should not happen.
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
s64 arena_usage_bytes (Arena* arena) { return (s64)(arena->current_point - arena_start(arena)); }
|
||||
s64 arena_usage_committed_bytes (Arena* arena) { return (s64)(arena->first_uncommitted_page - arena->memory_base); }
|
||||
|
||||
// for arena details, I need to setup my string builder first.
|
||||
Allocator get_allocator(Arena* arena) {
|
||||
return { arena_allocator_proc, arena };
|
||||
}
|
||||
168
lib/Base/Arena.h
Normal file
168
lib/Base/Arena.h
Normal file
@ -0,0 +1,168 @@
|
||||
#pragma once
|
||||
|
||||
#include "Allocator.h"
|
||||
#include "Base.h"
|
||||
#include "Base_Thread_Context.h"
|
||||
|
||||
#if OS_WINDOWS
|
||||
constexpr u32 ARENA_DEFAULT_COMMIT_PAGE_COUNT = 16; // 16 * 4k page = 64kB
|
||||
constexpr s64 ARENA_DEFAULT_COMMIT_SIZE_BYTES = 65536;
|
||||
#endif
|
||||
|
||||
#define ARENA_DEBUG BUILD_DEBUG
|
||||
|
||||
enum class Arena_Reserve: u8 {
|
||||
Size_64K = 0, // these are used as indices in Arena_Table!
|
||||
Size_2M = 1,
|
||||
Size_64M = 2,
|
||||
Size_2G = 3,
|
||||
Size_64G = 4,
|
||||
Size_2T = 5
|
||||
};
|
||||
enum class Arena_Flags: u8 {
|
||||
None = 0,
|
||||
Chained = 0x01,
|
||||
Is_Bootstrapped = 0x02,
|
||||
Large_Pages = 0x40,
|
||||
// Secure_Arena = 0xF0 // #NOTE: Secure Arenas are not implemented here!
|
||||
};
|
||||
|
||||
force_inline Arena_Flags operator | (Arena_Flags a, Arena_Flags b) {
|
||||
return (Arena_Flags)(((u8)a) | ((u8)b));
|
||||
}
|
||||
|
||||
force_inline Arena_Flags operator & (Arena_Flags a, Arena_Flags b) {
|
||||
return (Arena_Flags)(((u8)a) & ((u8)b));
|
||||
}
|
||||
|
||||
force_inline Arena_Flags& operator |= (Arena_Flags& a, Arena_Flags b) {
|
||||
a = a | b;
|
||||
return a;
|
||||
}
|
||||
|
||||
force_inline Arena_Flags operator ~ (Arena_Flags a) {
|
||||
return (Arena_Flags)(~((u8)a));
|
||||
}
|
||||
|
||||
struct Arena {
|
||||
u8* current_point = nullptr;
|
||||
u8* memory_base = nullptr;
|
||||
u8* first_uncommitted_page = nullptr;
|
||||
u16 alignment = CPU_REGISTER_WIDTH_BYTES;
|
||||
Arena_Reserve reserve_size = Arena_Reserve::Size_64K;
|
||||
Arena_Flags flags = Arena_Flags::None;
|
||||
u32 initial_commit_page_count = ARENA_DEFAULT_COMMIT_PAGE_COUNT;
|
||||
};
|
||||
|
||||
typedef void* (*Memory_Wipe_Function)(void* memory, u64 byte_count);
|
||||
|
||||
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
|
||||
// Interface API for normal use (idk how to explain - see Arena_Table.cpp)
|
||||
void initialize_arena_table ();
|
||||
Arena* next_arena (Arena_Reserve reserve_size);
|
||||
void release_arena (Arena* arena, bool delete_extra_pages=true);
|
||||
|
||||
// Main API
|
||||
Arena* bootstrap_arena (Arena_Reserve new_reserve, s32 default_commit_page_count);
|
||||
|
||||
void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 default_commit_page_count=16); // For when we're *not* bootstrapping arenas: (I'm debating if we should keep this..)
|
||||
bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset=0); // This is useful for initializing arenas (arena_init), and for starting Arena-backed arrays.
|
||||
|
||||
void arena_clear_flags (Arena* arena);
|
||||
void arena_set_bootstrap_flag (Arena* arena);
|
||||
void arena_set_secure_flag (Arena* arena);
|
||||
|
||||
void arena_reset_keeping_memory (Arena* arena); // just sets current point to arena_start
|
||||
void arena_reset (Arena* arena); // frees excess pages
|
||||
void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_function);
|
||||
|
||||
// Internal API (should not be called directly by program)
|
||||
void* arena_alloc (Arena* arena, s64 byte_count);
|
||||
|
||||
// Utilties
|
||||
u8* arena_start (Arena* arena);
|
||||
u8* arena_address_limit (Arena* arena);
|
||||
s64 arena_usage_bytes (Arena* arena);
|
||||
s64 arena_usage_committed_bytes (Arena* arena);
|
||||
// arena_details (requires print)
|
||||
s64 reserve_size (Arena* arena);
|
||||
s64 reserve_size (Arena_Reserve ar);
|
||||
bool is_valid (Arena* arena);
|
||||
Allocator get_allocator(Arena* arena);
|
||||
|
||||
// Platform-Specific Implementations (forward-declared)
|
||||
void platform_init (Arena* arena, s64 new_reserve);
|
||||
void extend_committed_pages (Arena* arena, u8* end);
|
||||
void free_pages_down_to (Arena* arena, s64 pages_to_keep);
|
||||
void arena_delete (Arena* arena);
|
||||
|
||||
// Scoped Macros/Functions for auto_reset and auto_release
|
||||
// usage `Auto_Reset guard(arena);` within a scope.
|
||||
struct Auto_Reset {
|
||||
Arena* arena;
|
||||
u8* starting_point;
|
||||
|
||||
Auto_Reset(Arena* arena) {
|
||||
this->arena = arena;
|
||||
this->starting_point = arena->current_point;
|
||||
Assert(is_valid(arena));
|
||||
}
|
||||
|
||||
~Auto_Reset() {
|
||||
arena->current_point = starting_point;
|
||||
}
|
||||
};
|
||||
|
||||
struct Auto_Release {
|
||||
Arena* arena;
|
||||
u8* starting_point;
|
||||
|
||||
Auto_Release(Arena* arena) {
|
||||
this->arena = arena;
|
||||
this->starting_point = arena->current_point;
|
||||
Assert(is_valid(arena));
|
||||
}
|
||||
|
||||
~Auto_Release() {
|
||||
arena->current_point = starting_point;
|
||||
free_pages_down_to(arena, arena->initial_commit_page_count);
|
||||
}
|
||||
};
|
||||
|
||||
struct Push_Alignment {
|
||||
Arena* arena;
|
||||
u16 original_alignment;
|
||||
|
||||
Push_Alignment(Arena* arena, u16 alignment) {
|
||||
Assert(is_valid(arena));
|
||||
this->arena = arena;
|
||||
this->original_alignment = arena->alignment;
|
||||
this->arena->alignment = alignment;
|
||||
}
|
||||
|
||||
~Push_Alignment() {
|
||||
arena->alignment = original_alignment;
|
||||
}
|
||||
};
|
||||
|
||||
struct Push_Arena {
|
||||
Thread_Context* context;
|
||||
Allocator original_allocator;
|
||||
|
||||
Push_Arena(Arena* arena) {
|
||||
Assert(is_valid(arena));
|
||||
context = get_thread_context();
|
||||
Assert(context != nullptr);
|
||||
original_allocator = context->allocator;
|
||||
context->allocator = get_allocator(arena);
|
||||
}
|
||||
|
||||
~Push_Arena() {
|
||||
context->allocator = original_allocator;
|
||||
}
|
||||
};
|
||||
|
||||
// Do this later:
|
||||
// arena_lock_pages :: (using arena: *Arena, start_address: *u8, byte_count: s64)
|
||||
// arena_unlock_pages :: (using arena: *Arena, start_address: *u8, byte_count: s64)
|
||||
270
lib/Base/Arena_Array.h
Normal file
270
lib/Base/Arena_Array.h
Normal file
@ -0,0 +1,270 @@
|
||||
#pragma once
|
||||
|
||||
#include "Allocator.h"
|
||||
#include "Arena.h"
|
||||
|
||||
constexpr s64 ARRAY_ARENA_START_OFFSET = 64;
|
||||
|
||||
template <typename T>
|
||||
struct ArenaArray { // downcasts to an ArrayView.
|
||||
using ValueType = T;
|
||||
s64 count;
|
||||
T* data;
|
||||
s64 allocated;
|
||||
Arena* arena; // We can probably assume arena is &Array-32
|
||||
|
||||
T& operator[] (s64 index) {
|
||||
#if ARRAY_ENABLE_BOUNDS_CHECKING
|
||||
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
|
||||
#endif
|
||||
|
||||
return static_cast<T*>(data)[index];
|
||||
}
|
||||
};
|
||||
|
||||
// #NOTE: I am not defining arena_array_init (ArenaArray<T>*), because I do not want to
|
||||
// encourage it's usage!
|
||||
// #TODO: array_free vs arena_array_destroy or arena_array_delete or
|
||||
template <typename T>
|
||||
ArenaArray<T>* arena_array_new (s64 preallocate_count, Arena_Reserve reserve_size) {
|
||||
Arena* arena = next_arena(reserve_size);
|
||||
Push_Arena push_arena(arena);
|
||||
Push_Alignment push_alignment(arena, 1);
|
||||
ArenaArray<T>* array = New<ArenaArray<T>>(true);
|
||||
array->arena = arena;
|
||||
|
||||
s64 commit_size_bytes = preallocate_count * sizeof(T);
|
||||
if (arena_commit_first_pages(array->arena, commit_size_bytes, ARRAY_ARENA_START_OFFSET)) {
|
||||
array->allocated = preallocate_count;
|
||||
}
|
||||
|
||||
array.count = 0;
|
||||
array.arena = new_arena;
|
||||
array.data = array_start(array);
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
template <typename T> T* array_start (ArenaArray<T>& array) {
|
||||
return (array->arena->memory_base + ARRAY_ARENA_START_OFFSET);
|
||||
}
|
||||
|
||||
template <typename T> bool is_empty (ArenaArray<T>& array) {
|
||||
return ((array.count == 0) || !is_valid(array.arena));
|
||||
}
|
||||
|
||||
template <typename T> s64 memory_usage (ArenaArray<T>& array) {
|
||||
if (array == nullptr) return 0;
|
||||
return arena_usage_committed_bytes(array.arena);
|
||||
}
|
||||
|
||||
template <typename T> void array_free (ArenaArray<T>& array) {
|
||||
array.count = 0;
|
||||
array.allocated = 0;
|
||||
|
||||
release_arena(array.arena, delete_extra_pages=true);
|
||||
}
|
||||
|
||||
template <typename T> ArrayView<T> array_view (Array<T> array) {
|
||||
ArrayView<T> av;
|
||||
av.count = array.count;
|
||||
av.data = array.data;
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T> ArrayView<T> to_view (ArenaArray<T>& array) {
|
||||
ArrayView<T> av;
|
||||
av.count = array.count;
|
||||
av.data = array.data;
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T> ArrayView<T> to_view (ArenaArray<T>& array, s64 start_offset, s64 count) {
|
||||
Assert(start_offset >= 0); Assert(count >= 0);
|
||||
|
||||
ArrayView<T> av = { 0, nullptr };
|
||||
|
||||
if (start_offset >= array.count) {
|
||||
return av; // empty
|
||||
}
|
||||
|
||||
av.count = count;
|
||||
av.data = array.data + start_offset;
|
||||
|
||||
if (start_offset + count > array.count) {
|
||||
av.count = array.count - offset;
|
||||
}
|
||||
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T> void array_add (ArenaArray<T>& array, T item) {
|
||||
maybe_grow(array);
|
||||
array.data[array.count] = item;
|
||||
array.count += 1;
|
||||
}
|
||||
|
||||
template <typename T> T* array_add (ArenaArray<T>& array) {
|
||||
maybe_grow(array);
|
||||
|
||||
T* result = &array.data[array.count];
|
||||
|
||||
(*result) = T();
|
||||
|
||||
array.count += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T> force_inline void maybe_grow (ArenaArray<T>& array) {
|
||||
if (array.count >= array.allocated) {
|
||||
s64 reserve = 2 * array.allocated;
|
||||
// if reserve < 8 reserve = 8; // no point doing this because we allocate by page, and we're never realloc'ing
|
||||
reserve_internal(array, reserve, sizeof(T));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> force_inline void array_reserve (ArenaArray<T>& array, s64 desired_item_count) {
|
||||
reserve_internal((ArenaArray<void>&)array, desired_item_count, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T> void array_resize (ArenaArray<T>& array, s64 desired_item_count, bool initialize) {
|
||||
s64 old_count = array.count;
|
||||
reserve_internal((ArenaArray<void>&)array, desired_item_count, sizeof(T));
|
||||
array.count = desired_item_count;
|
||||
if (initialize) {
|
||||
init_range(array.data, old_count, desired_item_count);
|
||||
}
|
||||
}
|
||||
|
||||
void reserve_internal (ArenaArray<void>& array, s64 desired_item_count, s64 element_size) {
|
||||
if (desired_item_count <= array.allocated) return;
|
||||
|
||||
array_arena_realloc(array, desired_item_count * element_size, array.allocated * element_size);
|
||||
|
||||
array.allocated = desired_item_count;
|
||||
}
|
||||
|
||||
s64 max_array_size (ArenaArray<void>& array) {
|
||||
return reserve_size(array.arena) - sizeof(Arena) - sizeof(ArenaArray<void>);
|
||||
}
|
||||
|
||||
void array_arena_realloc (ArenaArray<void>& array, s64 new_size, s64 old_size) {
|
||||
Assert(new_size <= max_array_size(array));
|
||||
|
||||
void* array_begin = (void*)array.data;
|
||||
void* result_end = array_begin + new_size;
|
||||
|
||||
// Check if we need more pages:
|
||||
if (result_end > array.arena.first_uncommitted_page) {
|
||||
// Critical error if we run out of address space!
|
||||
if (result_end > address_limit(array.arena)) {
|
||||
// #TODO Log error.
|
||||
Assert(false); // Failed to allocate because Arena is full and cannot expand
|
||||
return;
|
||||
}
|
||||
|
||||
extend_committed_pages(array.arena, (u8*)result_end);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> void init_range (T* ptr, s64 start_offset, s64 end_offset) {
|
||||
for (s64 i = start_offset; i < end_offset; i += 1) {
|
||||
T* current_item = ptr + i;
|
||||
(*current_item) = T(); // is this correct in-place init?
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> force_inline void array_reset (ArenaArray<T>& array) {
|
||||
// reset backing array:
|
||||
arena_reset(array.arena);
|
||||
array.count = 0;
|
||||
array.allocated = 0;
|
||||
}
|
||||
|
||||
template <typename T> force_inline void reset_keeping_memory (ArenaArray<T>& array) {
|
||||
array.count = 0;
|
||||
}
|
||||
|
||||
template <typename T> force_inline void ordered_remove_by_index (ArenaArray<T>& array, s64 index) {
|
||||
Assert(index >= 0); Assert(index < array.count);
|
||||
|
||||
for (s64 i = index; i < array.count-1; i += 1) {
|
||||
array.data[i] = array.data[i - 1];
|
||||
}
|
||||
|
||||
array.count -= 1;
|
||||
}
|
||||
|
||||
template <typename T> force_inline void unordered_remove_by_index (ArenaArray<T>& array, s64 index) {
|
||||
Assert(index >= 0); Assert(index < array.count);
|
||||
|
||||
s64 last_index = array.count - 1;
|
||||
if index != last_index { array.data[index] = array.data[last_index]; }
|
||||
|
||||
array.count -= 1;
|
||||
}
|
||||
|
||||
template <typename T> void insert_at(ArenaArray<T>& array, s64 offset, ArenaArray<T>& src_array) {
|
||||
Assert(offset >= 0 && offset <= array.count);
|
||||
if (!is_valid(src_array)) return;
|
||||
|
||||
s64 new_count = array.count + src_array.count;
|
||||
array_reserve(array, new_count);
|
||||
|
||||
T* src = array.data + offset;
|
||||
T* dst = src + src_array.count;
|
||||
memcpy(dst, src, (array.count - offset) * sizeof(T));
|
||||
|
||||
memcpy(array.data + offset, src_array.data, src_array.count * sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T> T pop (ArenaArray<T>& array) {
|
||||
T result = array.data[array.count-1];
|
||||
array.count -= 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T> T peek (ArenaArray<T>& array) {
|
||||
return array.data[array.count-1];
|
||||
}
|
||||
|
||||
template <typename T> T* peek_pointer (ArenaArray<T>& array) {
|
||||
return &(array.data[array.count-1]);
|
||||
}
|
||||
|
||||
template <typename T> void delete_range (ArenaArray<T>& array, s64 start_offset, s64 count) {
|
||||
Assert(start_offset >= 0 && count >= 0 && start_offset + count <= array.count);
|
||||
memcpy(array.data + start_offset, array.data + start_offset + count, (array.count - start_offset - count) * sizeof(T));
|
||||
array.count -= count;
|
||||
}
|
||||
|
||||
template <typename T> ArenaArray<T>& array_copy (ArenaArray<T>& array) {
|
||||
auto new_array = arena_array_new<T>(array.arena.reserve_size);
|
||||
array_reserve(new_array, array.count);
|
||||
memcpy(new_array.data, array.data, array.count * sizeof(T));
|
||||
new_array.count = array.count;
|
||||
return new_array;
|
||||
}
|
||||
|
||||
template <typename T> ArrayView<T> array_copy_as_view (ArenaArray<T>& array) {
|
||||
ArrayView<T> view = { array.count, array.data };
|
||||
return view;
|
||||
}
|
||||
|
||||
// [x] initialize_range
|
||||
// [x] max_array_size
|
||||
// [x] maybe_grow
|
||||
// [x] resize
|
||||
// [x] reserve
|
||||
// [x] reserve_internal
|
||||
// [x] array_arena_realloc
|
||||
// [x] reset_keeping_memory
|
||||
// [x] ordered_remove_by_index
|
||||
// [x] unordered_remove_by_index
|
||||
// [x] insert_at
|
||||
// [x] pop
|
||||
// [x] peek
|
||||
// [x] peek_pointer
|
||||
// [x] delete_range
|
||||
// [x] array_copy
|
||||
|
||||
59
lib/Base/Arena_Table.cpp
Normal file
59
lib/Base/Arena_Table.cpp
Normal file
@ -0,0 +1,59 @@
|
||||
// API in Arena.h
|
||||
#include "Arena.h"
|
||||
#include "Array.h"
|
||||
#include <mutex>
|
||||
|
||||
global std::mutex arena_table_mutex;
|
||||
global s32 arenas_in_flight_count[6];
|
||||
global Array<Arena*> arena_free_table[6];
|
||||
#if BUILD_DEBUG
|
||||
global Array<Arena*> arenas_in_flight[6];
|
||||
#endif
|
||||
|
||||
void initialize_arena_table () {
|
||||
for (s32 i = 0; i < 6; i += 1) {
|
||||
array_reserve(arena_free_table[i], 64);
|
||||
array_reserve(arenas_in_flight[i], 64);
|
||||
}
|
||||
}
|
||||
|
||||
Arena* next_arena (Arena_Reserve reserve_size) {
|
||||
Arena* arena;
|
||||
std::lock_guard<std::mutex> lock(arena_table_mutex);
|
||||
s64 reserve_index = (s64)reserve_size;
|
||||
arenas_in_flight_count[reserve_index] += 1;
|
||||
if (!arena_free_table[reserve_index].count) {
|
||||
arena = bootstrap_arena(reserve_size, ARENA_DEFAULT_COMMIT_PAGE_COUNT);
|
||||
} else {
|
||||
arena = pop(arena_free_table[reserve_index]);
|
||||
}
|
||||
#if ARENA_DEBUG
|
||||
array_add(arenas_in_flight[reserve_index], arena);
|
||||
#endif
|
||||
|
||||
Assert(arena != nullptr);
|
||||
|
||||
return arena;
|
||||
}
|
||||
|
||||
void release_arena (Arena* arena, bool delete_extra_pages) {
|
||||
std::lock_guard<std::mutex> lock(arena_table_mutex);
|
||||
s64 reserve_index = (s64)arena->reserve_size;
|
||||
#if ARENA_DEBUG
|
||||
array_unordered_remove_by_value(arenas_in_flight[reserve_index], arena, 1);
|
||||
#endif
|
||||
arena_reset_keeping_memory(arena);
|
||||
if (delete_extra_pages) {
|
||||
free_pages_down_to(arena, arena->initial_commit_page_count);
|
||||
}
|
||||
array_add(arena_free_table[reserve_index], arena);
|
||||
|
||||
arenas_in_flight_count[reserve_index] -= 1;
|
||||
|
||||
// #TODO: Garbage collection if we have >> 64 in a particular table for a while.
|
||||
//
|
||||
if (arena_free_table[reserve_index].count > 64) {
|
||||
// release some arenas if required
|
||||
// arena_delete(...)
|
||||
}
|
||||
}
|
||||
67
lib/Base/Arena_Windows.cpp
Normal file
67
lib/Base/Arena_Windows.cpp
Normal file
@ -0,0 +1,67 @@
|
||||
#if OS_WINDOWS
|
||||
|
||||
// This file is included in Arena.cpp, so we don't need to include headers here.
|
||||
|
||||
void platform_init (Arena* arena, s64 new_reserve) {
|
||||
s64 page_aligned_reserve_size = Align_To_Page_Size(new_reserve);
|
||||
|
||||
void* address_start =
|
||||
VirtualAlloc(nullptr, (u64)page_aligned_reserve_size, MEM_RESERVE, PAGE_READWRITE);
|
||||
|
||||
if (address_start == nullptr) {
|
||||
// get error value and string?
|
||||
s32 error_code = GetLastError();
|
||||
return;
|
||||
// #TODO(LOG) log_error("In Arena:platform_init, VirtualAlloc failed with code %d\n", error_code)
|
||||
}
|
||||
|
||||
arena->memory_base = (u8*)address_start;
|
||||
}
|
||||
|
||||
void extend_committed_pages (Arena* arena, u8* end) {
|
||||
s64 delta = end - arena->first_uncommitted_page;
|
||||
Assert(delta >= 0);
|
||||
|
||||
s64 size = Align_To_Page_Size(delta);
|
||||
|
||||
VirtualAlloc(arena->first_uncommitted_page, (u64)size, MEM_COMMIT, PAGE_READWRITE);
|
||||
|
||||
// arena_lock_pages(arena, arena->first_uncommitted_page, size);
|
||||
arena->first_uncommitted_page += size;
|
||||
}
|
||||
|
||||
void free_pages_down_to (Arena* arena, s64 pages_to_keep) {
|
||||
if (arena == nullptr) return;
|
||||
Assert(pages_to_keep >= 0);
|
||||
s64 bytes_to_keep = pages_to_keep * PLATFORM_MEMORY_PAGE_SIZE;
|
||||
if (bytes_to_keep >= reserve_size(arena)) {
|
||||
Assert(false); // Break in debug builds, but release we just do nothing.
|
||||
return; // just do nothing here. Maybe we should assert?
|
||||
}
|
||||
|
||||
u64 bytes_committed = (u64)(arena->first_uncommitted_page - arena->memory_base);
|
||||
if (bytes_to_keep > (s64)bytes_committed) {
|
||||
return; // nothing to free
|
||||
}
|
||||
|
||||
u8* start_address = arena->memory_base + bytes_to_keep;
|
||||
u64 bytes_to_decommit = (u64)(arena->first_uncommitted_page - start_address);
|
||||
// arena_unlock_pages(arena, start_address, (s64)bytes_to_decommit);
|
||||
|
||||
VirtualFree(start_address, bytes_to_decommit, MEM_DECOMMIT);
|
||||
|
||||
arena->first_uncommitted_page = start_address;
|
||||
}
|
||||
|
||||
void arena_delete (Arena* arena) {
|
||||
if (!is_valid(arena)) return;
|
||||
bool arena_was_boostrapped = (arena->flags & Arena_Flags::Is_Bootstrapped) == Arena_Flags::Is_Bootstrapped;
|
||||
|
||||
VirtualFree(arena->memory_base, 0, MEM_RELEASE);
|
||||
|
||||
if (!arena_was_boostrapped) {
|
||||
arena->memory_base = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
367
lib/Base/Array.h
Normal file
367
lib/Base/Array.h
Normal file
@ -0,0 +1,367 @@
|
||||
// Strongly influenced by Array.jai in Basic module.
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
#include "Allocator.h"
|
||||
|
||||
#define DEFAULT_ARRAY_ALIGNMENT 16
|
||||
|
||||
// #NOTE: This uses `General_Purpose_Allocator` for simplicity.
|
||||
// For Arena-Backed arrays use ArenaArray
|
||||
|
||||
MSVC_RUNTIME_CHECKS_OFF
|
||||
|
||||
template <typename T>
|
||||
struct Array { // downcasts to an ArrayView.
|
||||
using ValueType = T;
|
||||
s64 count;
|
||||
T* data;
|
||||
s64 allocated;
|
||||
s64 alignment = DEFAULT_ARRAY_ALIGNMENT;
|
||||
|
||||
Array() {
|
||||
memset(this, 0, sizeof(*this));
|
||||
alignment = DEFAULT_ARRAY_ALIGNMENT;
|
||||
}
|
||||
|
||||
Array(s64 new_count, s64 _alignment, bool zero_memory=true) {
|
||||
count = new_count;
|
||||
data = (T*)GPAllocator_New(new_count * sizeof(T), _alignment);
|
||||
if (zero_memory) { memset(data, 0, new_count * sizeof(T)); }
|
||||
alignment = _alignment;
|
||||
allocated = new_count;
|
||||
}
|
||||
|
||||
// Use constructor delegation to pass params to above constructor
|
||||
Array(s64 new_count, bool zero_memory=true)
|
||||
: Array(new_count, DEFAULT_ARRAY_ALIGNMENT, zero_memory) {}
|
||||
|
||||
// initializer-list type instantiation: `Array<T> new_array = {count, data}`
|
||||
// This is essentially an arrayview.
|
||||
// (Musa) Ok, but this array cannot then be resized.
|
||||
Array(s64 new_count, void* new_data) {
|
||||
count = new_count;
|
||||
data = (T*)new_data;
|
||||
|
||||
allocated = new_count;
|
||||
alignment = DEFAULT_ARRAY_ALIGNMENT;
|
||||
}
|
||||
|
||||
// Used by array_zero, array_copy, etc.
|
||||
Array(s64 new_count, void* new_data, s64 _allocated, s64 _alignment) {
|
||||
count = new_count; data = (T*)new_data; allocated = _allocated; alignment = _alignment;
|
||||
}
|
||||
|
||||
T& operator[](s64 index) {
|
||||
#if ARRAY_ENABLE_BOUNDS_CHECKING
|
||||
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
|
||||
#endif
|
||||
return static_cast<T*>(data)[index];
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
bool is_valid(Array<T> src) {
|
||||
if (src.count == 0) return true;
|
||||
if (src.count < 0) return false;
|
||||
if (src.data == nullptr) return false;
|
||||
if (src.allocated < src.count) return false;
|
||||
// if ((src.alignment % 8) != 0) return false; Dubious - we could want an alignment of 1
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Array<T> array_copy_zero(const Array<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
return Array<T>(); // Return an empty array
|
||||
}
|
||||
|
||||
void* new_data = GPAllocator_New(src.count * sizeof(T), src.alignment);
|
||||
memset(new_data, 0, src.count * sizeof(T));
|
||||
|
||||
return Array<T>(src.count, new_data, src.allocated, src.alignment);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Array<T> array_copy(const Array<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
return Array<T>(); // Return an empty array
|
||||
}
|
||||
|
||||
void* new_data = GPAllocator_New(src.count * sizeof(T), src.alignment);
|
||||
memcpy(new_data, src.data, src.count * sizeof(T));
|
||||
|
||||
return Array<T>(src.count, new_data, src.allocated, src.alignment);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_reset_count(Array<T>& src) {
|
||||
src.count = 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_free(Array<T>& src) {
|
||||
GPAllocator_Delete(src.data);
|
||||
src.count = 0;
|
||||
src.data = nullptr;
|
||||
src.allocated = 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_initialize(Array<T>& src, s64 start, s64 end) {
|
||||
for (s64 i = start; i < end; i += 1) {
|
||||
// Really this can be one ini followed by a bunch of memcpy.
|
||||
// For long arrays we could power-of-two double the copy out, etc.
|
||||
src[i] = T(); // `new (&src[i]) T();` also works.
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_reserve(Array<T>& src, s64 desired_items) {
|
||||
if (desired_items <= src.allocated) return;
|
||||
|
||||
src.data = (T*)GPAllocator_Resize(src.allocated * sizeof(T), src.data, desired_items * sizeof(T), src.alignment);
|
||||
Assert(src.data != nullptr);
|
||||
|
||||
src.allocated = desired_items;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_resize(Array<T>& src, s64 new_count, bool initialize=true) {
|
||||
if (src.count == new_count) return;
|
||||
|
||||
s64 old_count = src.count;
|
||||
|
||||
array_reserve(src, new_count);
|
||||
src.count = new_count;
|
||||
|
||||
if (initialize) { array_initialize(src, old_count, new_count); }
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
force_inline void array_maybe_grow(Array<T>& src) {
|
||||
if (src.count >= src.allocated) {
|
||||
// Replace with Basic.max(8, 2 * src.count).
|
||||
s64 reserve = 8;
|
||||
if (src.count * 2 > reserve) { reserve = src.count * 2; }
|
||||
array_reserve(src, reserve);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T pop(Array<T>& src) {
|
||||
auto result = src[src.count-1]; // how do I dereference?
|
||||
src.count -= 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void array_add(Array<T>& src, U new_item) {
|
||||
static_assert(sizeof(U) <= sizeof(T));
|
||||
auto new_count = src.count + 1;
|
||||
array_maybe_grow(src);
|
||||
|
||||
T new_item_casted = (T)new_item;
|
||||
|
||||
src.count += 1;
|
||||
memcpy(&src[src.count-1], &new_item_casted, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_add(Array<T>& src, T new_item) {
|
||||
auto new_count = src.count + 1;
|
||||
array_maybe_grow(src);
|
||||
|
||||
src.count += 1;
|
||||
memcpy(&src[src.count-1], &new_item, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
s64 array_find(Array<T>& src, T item) {
|
||||
ForArray(i, src) {
|
||||
if (src[i] == item) return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_ordered_remove_by_index(Array<T>& src, s64 index) {
|
||||
Assert(index >= 0); Assert(index < src.count);
|
||||
|
||||
for (s64 i = index; i < src.count-1; i += 1) {
|
||||
src[i] = src[i + 1];
|
||||
}
|
||||
|
||||
src.count -= 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_ordered_remove_by_value(Array<T>& src, T item) {
|
||||
auto index = array_find(src, item);
|
||||
if (index != -1) { array_ordered_remove_by_index(src, index); }
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_unordered_remove_by_index(Array<T>& src, s64 index) {
|
||||
Assert(index >= 0); Assert(index < src.count);
|
||||
|
||||
auto last_index = src.count - 1;
|
||||
|
||||
if (index != last_index) { // Copy back item:
|
||||
memcpy(&src[index], &src[last_index], sizeof(T));
|
||||
}
|
||||
|
||||
src.count -= 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
s64 array_unordered_remove_by_value(Array<T>& src, T item, s64 max_count_to_remove) {
|
||||
s64 removed_count = 0;
|
||||
|
||||
for (s64 i = 0; i < src.count; i += 1) {
|
||||
if (src[i] == item) {
|
||||
removed_count += 1;
|
||||
array_unordered_remove_by_index(src, i);
|
||||
debug_break(); // haven't quite figured this one out yet.
|
||||
i -= 1; // check this element again??
|
||||
if (max_count_to_remove == removed_count) { break; }
|
||||
}
|
||||
}
|
||||
|
||||
return removed_count;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
struct ArrayView {
|
||||
using ValueType = T;
|
||||
s64 count;
|
||||
T* data;
|
||||
|
||||
ArrayView() { count = 0; data = nullptr; }
|
||||
|
||||
// If we don't need reallocation or alignments
|
||||
ArrayView(s64 new_count, s64 alignment=DEFAULT_ARRAY_ALIGNMENT, bool zero_memory=true) {
|
||||
count = new_count;
|
||||
data = (T*)GPAllocator_New(new_count * sizeof(T), alignment);
|
||||
if (zero_memory) { memset(data, 0, new_count * sizeof(T)); }
|
||||
}
|
||||
|
||||
// #Note: use array_view to create slices or to downcast to ArrayView!
|
||||
ArrayView(s64 _count, T* _data) {
|
||||
count = _count;
|
||||
data = _data;
|
||||
}
|
||||
|
||||
T& operator[](s64 index) {
|
||||
#if ARRAY_ENABLE_BOUNDS_CHECKING
|
||||
if (index < 0 || index >= count) { debug_break(); } // index out of bounds
|
||||
#endif
|
||||
return static_cast<T*>(data)[index];
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
bool is_zero(ArrayView<T> src) {
|
||||
if (src.count == 0) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// #NOTE: procedures should be robust to arrays with count of zero!
|
||||
// Whether or not this is an error is procedure specific, but for most
|
||||
// things, there is a default behavior that is expected.
|
||||
template <typename T>
|
||||
bool is_valid(ArrayView<T> src) {
|
||||
if (src.count < 0) return false;
|
||||
if (src.count == 0) return true;
|
||||
if (src.data == nullptr) return false;
|
||||
// #TODO: For debug builds we can use VirtualQuery to check if
|
||||
// all pages are writable, but that seems excessive for now.
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(s64 view_count, T* view_data) {
|
||||
ArrayView<T> av;
|
||||
av.count = view_count;
|
||||
av.data = view_data;
|
||||
return av;
|
||||
} // #unsafe, no abc
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(Array<T> array) {
|
||||
ArrayView<T> av;
|
||||
av.count = array.count;
|
||||
av.data = array.data;
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(ArrayView<T> array, s64 start_index, s64 view_count) {
|
||||
ArrayView<T> av;
|
||||
av.count = view_count; // check if count exceeds
|
||||
Assert(start_index + view_count <= array.count);
|
||||
av.data = &array[start_index];
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_view(Array<T> array, s64 start_index, s64 view_count) {
|
||||
ArrayView<T> av;
|
||||
av.count = view_count; // check if count exceeds
|
||||
Assert(start_index + view_count <= array.count);
|
||||
av.data = &array[start_index];
|
||||
return av;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_reset_count(ArrayView<T>& src) {
|
||||
src.count = 0;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
ArrayView<T> array_copy(const ArrayView<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
return ArrayView<T>(); // Return an empty array
|
||||
}
|
||||
|
||||
void* new_data = GPAllocator_New(src.count * sizeof(T), DEFAULT_ARRAY_ALIGNMENT);
|
||||
memcpy(new_data, src.data, src.count * sizeof(T));
|
||||
|
||||
return ArrayView<T>(src.count, (T*)new_data);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_free(ArrayView<T>& src) {
|
||||
if (!src.data || src.count == 0) { return; }
|
||||
GPAllocator_Delete(src.data);
|
||||
src.count = 0;
|
||||
src.data = nullptr;
|
||||
}
|
||||
|
||||
template <typename T, typename... ArgValues>
|
||||
Array<T> NewArrayFromValues(ArgValues... args) {
|
||||
constexpr s64 N = sizeof...(ArgValues);
|
||||
auto array = Array<T>(N, /*initialize:*/false);
|
||||
T values[] = {args...};
|
||||
for (s64 i = 0; i < N; i += 1) {
|
||||
array[i] = values[i];
|
||||
}
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
template <typename T, typename... ArgValues>
|
||||
ArrayView<T> NewArrayViewFromValues(ArgValues... args) {
|
||||
constexpr s64 N = sizeof...(ArgValues);
|
||||
auto array = ArrayView<T>(N, /*initialize:*/false);
|
||||
T values[] = {args...};
|
||||
for (s64 i = 0; i < N; i += 1) {
|
||||
array[i] = values[i];
|
||||
}
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
MSVC_RUNTIME_CHECKS_RESTORE
|
||||
28
lib/Base/Base.cpp
Normal file
28
lib/Base/Base.cpp
Normal file
@ -0,0 +1,28 @@
|
||||
#include "Base.h"
|
||||
|
||||
// #TODO Split these up into platform_base (e.g. Windows_Base, Unix_Base...)
|
||||
#if OS_WINDOWS
|
||||
#include <intrin.h>
|
||||
|
||||
int CPU_Base_Frequency() {
|
||||
int cpuInfo[4] = {0};
|
||||
|
||||
// Call CPUID with EAX = 0x16 (Base CPU Frequency)
|
||||
__cpuid(cpuInfo, 0x16);
|
||||
|
||||
return cpuInfo[0];
|
||||
}
|
||||
#endif
|
||||
|
||||
#if OS_IS_UNIX
|
||||
#include <cpuid.h>
|
||||
|
||||
int CPU_Base_Frequency() {
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
if (__get_cpuid(0x16, &eax, &ebx, &ecx, &edx)) {
|
||||
return eax;
|
||||
}
|
||||
|
||||
return 0; // not found or supported
|
||||
}
|
||||
#endif
|
||||
176
lib/Base/Base.h
Normal file
176
lib/Base/Base.h
Normal file
@ -0,0 +1,176 @@
|
||||
#pragma once
|
||||
|
||||
#include "meta_generated.h"
|
||||
|
||||
#define LANG_CPP 1
|
||||
#define BUILD_CONSOLE_INTERFACE BUILD_DEBUG
|
||||
|
||||
#if ARCH_CPU_X64
|
||||
#define PLATFORM_MEMORY_PAGE_SIZE 4096
|
||||
#define PLATFORM_MEMORY_LARGE_PAGE_SIZE 2097152
|
||||
#define CPU_REGISTER_WIDTH_BYTES 8
|
||||
#define CPU_CACHE_LINE_SIZE 64
|
||||
#else
|
||||
#error "CPU not supported (yet)!"
|
||||
#endif
|
||||
|
||||
#if OS_WINDOWS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <Windows.h>
|
||||
#else
|
||||
#error "This configuration is NOT supported. Only Windows with MSVC is currently supported."
|
||||
#endif
|
||||
|
||||
#ifndef PROTOTYPING_API
|
||||
#ifdef OS_WINDOWS
|
||||
#define PROTOTYPING_API extern "C" __declspec(dllexport)
|
||||
#else
|
||||
#define PROTOTYPING_API
|
||||
#endif
|
||||
#endif // #ifndef PROTOTYPING_API
|
||||
|
||||
#define C_API
|
||||
#define TEMPORARY_API
|
||||
#define DEPRECATED_API
|
||||
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// Primitive types.
|
||||
typedef uint8_t u8;
|
||||
typedef uint16_t u16;
|
||||
typedef uint32_t u32;
|
||||
typedef uint64_t u64;
|
||||
typedef int8_t s8;
|
||||
typedef int16_t s16;
|
||||
typedef int32_t s32;
|
||||
typedef int64_t s64;
|
||||
// typedef bool b8; // just use bool for b8s
|
||||
typedef s16 b16;
|
||||
typedef s32 b32;
|
||||
typedef s64 b64;
|
||||
typedef float f32;
|
||||
typedef double f64;
|
||||
|
||||
// Units
|
||||
#define KB(n) (((s64)(n)) << 10)
|
||||
#define MB(n) (((s64)(n)) << 20)
|
||||
#define GB(n) (((s64)(n)) << 30)
|
||||
#define TB(n) (((s64)(n)) << 40)
|
||||
#define Thousand(n) ((n)*1000)
|
||||
#define Million(n) ((n)*1000000)
|
||||
#define Billion(n) ((n)*1000000000)
|
||||
|
||||
#define internal static
|
||||
#define global static
|
||||
#define local_persist static // I don't like these, so I generally won't use them!
|
||||
|
||||
#if COMPILER_MSVC
|
||||
# define thread_static __declspec(thread)
|
||||
#elif COMPILER_CLANG || COMPILER_GCC
|
||||
# define thread_static __thread
|
||||
#else
|
||||
# error thread_static not defined for this compiler.
|
||||
#endif
|
||||
|
||||
#if COMPILER_MSVC || (COMPILER_CLANG && OS_WINDOWS)
|
||||
# pragma section(".rdata$", read)
|
||||
# define read_only __declspec(allocate(".rdata$"))
|
||||
#elif (COMPILER_CLANG && OS_LINUX)
|
||||
# define read_only __attribute__((section(".rodata")))
|
||||
#else
|
||||
#endif
|
||||
|
||||
#if COMPILER_MSVC
|
||||
# define force_inline __forceinline
|
||||
#elif COMPILER_CLANG || COMPILER_GCC
|
||||
# define force_inline __attribute__((always_inline))
|
||||
#else
|
||||
# error force_inline not defined for this compiler.
|
||||
#endif
|
||||
|
||||
// Maybe move to a different file.
|
||||
force_inline s64 Align_To_Page_Size(s64 n) {
|
||||
return (n + PLATFORM_MEMORY_PAGE_SIZE - 1) & (~(PLATFORM_MEMORY_PAGE_SIZE-1));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
force_inline T Align (T value, s64 alignment) {
|
||||
s64 intermediate = (((s64)value) + alignment - 1) & (~(alignment - 1));
|
||||
return (T)intermediate;
|
||||
}
|
||||
|
||||
// #TODO: template this so it works with any pointer type
|
||||
// force_inline u8* Align_To_Cache_Line(u8* address)
|
||||
/*
|
||||
force_inline s64 Align_Forwards(s64 size, s64 alignment) {
|
||||
return (((size + alignment - 1) / alignment) * alignment);
|
||||
}
|
||||
*/
|
||||
|
||||
// Branchless nextpow2 implementation. Returns zero if v is negative.
|
||||
// All it does is fill in all the bits to the right of the most significant bit.
|
||||
force_inline s64 Next_Power_Of_Two(s64 v) {
|
||||
v -= 1;
|
||||
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
v |= v >> 32;
|
||||
|
||||
v += 1;
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
#define Stringify_(S) #S
|
||||
#define Stringify(S) Stringify_(S)
|
||||
|
||||
#if COMPILER_MSVC
|
||||
# define debug_break() __debugbreak()
|
||||
#elif COMPILER_CLANG || COMPILER_GCC
|
||||
# define debug_break() __builtin_trap()
|
||||
#else
|
||||
# define debug_break()
|
||||
# error Unknown trap intrinsic for this compiler.
|
||||
#endif
|
||||
|
||||
#define AssertAlways(x) do{if(!(x)) {debug_break();}}while(0)
|
||||
#if BUILD_DEBUG
|
||||
# define Assert(x) AssertAlways(x)
|
||||
#else
|
||||
# define Assert(x) (void)(x)
|
||||
#endif
|
||||
|
||||
#if LANG_CPP
|
||||
# define C_LINKAGE_BEGIN extern "C"{
|
||||
# define C_LINKAGE_END }
|
||||
# define C_LINKAGE extern "C"
|
||||
#else
|
||||
# define C_LINKAGE_BEGIN
|
||||
# define C_LINKAGE_END
|
||||
# define C_LINKAGE
|
||||
#endif
|
||||
|
||||
// Disable some of MSVC most aggressive Debug runtime checks in function header/footer (used in some simple/low-level functions)
|
||||
#if COMPILER_MSVC
|
||||
#define MSVC_RUNTIME_CHECKS_OFF __pragma(runtime_checks("",off)) __pragma(check_stack(off)) __pragma(strict_gs_check(push,off))
|
||||
#define MSVC_RUNTIME_CHECKS_RESTORE __pragma(runtime_checks("",restore)) __pragma(check_stack()) __pragma(strict_gs_check(pop))
|
||||
#else
|
||||
#define MSVC_RUNTIME_CHECKS_OFF
|
||||
#define MSVC_RUNTIME_CHECKS_RESTORE
|
||||
#endif
|
||||
|
||||
// ForExpansions. Not sure if this is a good idea...
|
||||
// #TODO: Maybe remove these. I prefer verbose and clear over this.
|
||||
#define For(_idx_, _until_) for (s64 _idx_ = 0; _idx_ < _until_; ++_idx_)
|
||||
#define ForBetween(_idx_, _start_, _until_) for (s64 _idx_ = _start_; _idx_ < _until_; ++_idx_)
|
||||
#define ForArray(_idx_, _array_) for (s64 _idx_ = 0; _idx_ < (_array_).count; ++_idx_)
|
||||
#define ForArrayStartingAt(_it_, _array_, _start_) for (s64 _it_ = _start_; _it_ < (_array_).count; _it_ += 1)
|
||||
#define ForUpTo(_it_, _end_) for (s64 _it_ = 0; _it_ < _end_; _it_ += 1)
|
||||
|
||||
// #MOVE TO CPU_X64.cpp
|
||||
PROTOTYPING_API int CPU_Base_Frequency();
|
||||
|
||||
38
lib/Base/Base_String.h
Normal file
38
lib/Base/Base_String.h
Normal file
@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
|
||||
// Need to sort out how formatted strings and string builders are allocated
|
||||
// Maybe just use context.allocator?
|
||||
// What about temp strings? use context.temp?
|
||||
|
||||
struct string {
|
||||
s64 count;
|
||||
u8* data;
|
||||
// Construct from a string literal or C-string
|
||||
string() { // default constructor
|
||||
count = 0;
|
||||
data = nullptr;
|
||||
}
|
||||
|
||||
string(const char* cstr) {
|
||||
count = strlen(cstr);
|
||||
data = (u8*)cstr;
|
||||
}
|
||||
};
|
||||
|
||||
bool strings_match(string first_string, string second_string);
|
||||
|
||||
// ~ API ~ #TODO
|
||||
// string string_view(string n_string, int start_index, int view_count);
|
||||
// string copy_string(string original_string);
|
||||
// string copy_string(char* c_string);
|
||||
// void free(string& n_string);
|
||||
|
||||
bool is_valid(string n_string);
|
||||
bool is_c_string(string n_string);
|
||||
|
||||
char* to_c_string(string n_string);
|
||||
|
||||
string format_string(char* format, ...);
|
||||
string string_from_literal(char* literal);
|
||||
23
lib/Base/Base_Thread_Context.cpp
Normal file
23
lib/Base/Base_Thread_Context.cpp
Normal file
@ -0,0 +1,23 @@
|
||||
#include "Base_Thread_Context.h"
|
||||
|
||||
// C_LINKAGE thread_static TCTX* tctx_thread_local;
|
||||
thread_static Thread_Context* thread_local_context;
|
||||
|
||||
// Start from w32_entry_point_caller ->
|
||||
// see main_thread_base_entry_point
|
||||
// //- rjf: set up thread context
|
||||
// TCTX *tctx = tctx_alloc();
|
||||
// tctx_select(tctx);
|
||||
// See: tctx_alloc(void)
|
||||
|
||||
// Let's do arenas first.
|
||||
|
||||
|
||||
void init_thread_context(Thread_Context* tctx) {
|
||||
// Should be Arena-bootstrapped with Temp Arena maybe?
|
||||
// #TODO - call from entry point.
|
||||
}
|
||||
|
||||
Thread_Context* get_thread_context() {
|
||||
return (Thread_Context*)thread_local_context;
|
||||
}
|
||||
44
lib/Base/Base_Thread_Context.h
Normal file
44
lib/Base/Base_Thread_Context.h
Normal file
@ -0,0 +1,44 @@
|
||||
#pragma once
|
||||
|
||||
#include "Allocator.h"
|
||||
#include "Base.h"
|
||||
// #include "Arena.h"
|
||||
|
||||
struct Arena; // #TEMP - Base_Thread_Context and Arena rely on each other, so I have to figure out how to make this less messy (maybe with meta-generated forward declares?)
|
||||
|
||||
// See Context_Base in jai, and TCTX in raddebugger:
|
||||
struct Thread_Context {
|
||||
Arena* temp; // Used for temporary allocations.
|
||||
Arena* arena; // general purpose local arena
|
||||
|
||||
Allocator allocator;
|
||||
u16 GPAllocator_alignment = 16;
|
||||
// Logger logger;
|
||||
// Stack_Trace* stack_trace;
|
||||
// #TODO: other debug information
|
||||
|
||||
s64 thread_idx;
|
||||
|
||||
string thread_name;
|
||||
};
|
||||
|
||||
Thread_Context* get_thread_context();
|
||||
|
||||
#define push_allocator(x) Push_Allocator guard(x) // maybe should append line number to guard?
|
||||
struct Push_Allocator {
|
||||
Thread_Context* context;
|
||||
Allocator old_allocator;
|
||||
|
||||
Push_Allocator (Allocator new_allocator) {
|
||||
context = get_thread_context();
|
||||
old_allocator = context->allocator;
|
||||
context->allocator = new_allocator;
|
||||
}
|
||||
|
||||
~Push_Allocator () {
|
||||
context->allocator = old_allocator;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
484
lib/Base/Basic.cpp
Normal file
484
lib/Base/Basic.cpp
Normal file
@ -0,0 +1,484 @@
|
||||
#include "General_Purpose_Allocator.h"
|
||||
#include "Basic.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <math.h> // isnan, floor
|
||||
#include <stdlib.h> // qsort
|
||||
#include <cassert> // assert
|
||||
|
||||
|
||||
Native_Error* Basic_Difference2 (ArrayView<f64> input, ArrayView<f64>& output) {
|
||||
Array_Check(input);
|
||||
Array_Check(output);
|
||||
|
||||
// ensure enough room. Note output.count = input.count
|
||||
Assert(output.count >= input.count - 1);
|
||||
|
||||
ForUpTo(i, input.count-1) {
|
||||
output[i] = input[i + 1] - input[i];
|
||||
}
|
||||
|
||||
output.count = input.count - 1;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Mean2 (ArrayView<f64> input, f64* mean) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(mean);
|
||||
|
||||
f64 sum = 0;
|
||||
ForArray(i, input) {
|
||||
sum += input[i];
|
||||
}
|
||||
|
||||
(*mean) = (sum / (f64)input.count);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_QuickSortInPlace (ArrayView<f64> input) {
|
||||
Array_Check(input);
|
||||
|
||||
qsort(input.data, input.count, sizeof(double), qsort_doubles_comparator_nonnan);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Median2 (ArrayView<f64> unsorted_input, f64* median) {
|
||||
Array_Check(unsorted_input);
|
||||
Null_Pointer_Check(median);
|
||||
auto input_sorted = array_copy(unsorted_input);
|
||||
qsort(input_sorted.data, (u64)input_sorted.count, sizeof(f64), qsort_doubles_comparator_nonnan);
|
||||
|
||||
s64 middle_element_index = unsorted_input.count / 2;
|
||||
|
||||
if (unsorted_input.count % 2 == 1) {
|
||||
(*median) = input_sorted[middle_element_index];
|
||||
} else {
|
||||
(*median) = (input_sorted[middle_element_index - 1] + input_sorted[middle_element_index]) / 2.0;
|
||||
}
|
||||
|
||||
array_free(input_sorted);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_RescaleInPlace (ArrayView<f64> input, double min, double max) {
|
||||
Array_Check(input);
|
||||
if (max < min || max == min) { return New_Error("Min or max inputs are not valid!"); }
|
||||
|
||||
f64 smallest_element; f64 largest_element;
|
||||
auto error = Basic_Min2(input, &smallest_element);
|
||||
if (error != nullptr) return error;
|
||||
|
||||
error = Basic_Max2(input, &largest_element);
|
||||
if (error != nullptr) return error;
|
||||
|
||||
if (largest_element == smallest_element)
|
||||
return nullptr;
|
||||
|
||||
ForArray(i, input) {
|
||||
input[i] = (input[i] - smallest_element) / (largest_element - smallest_element) * (max - min) + min;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Min2 (ArrayView<f64> input, f64* min_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(min_out);
|
||||
|
||||
f64 min = input[0];
|
||||
ForArrayStartingAt(i, input, 1) {
|
||||
if (input[i] < min) {
|
||||
min = input[i];
|
||||
}
|
||||
}
|
||||
|
||||
(*min_out) = min;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Max2 (ArrayView<f64> input, f64* max_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(max_out);
|
||||
|
||||
f64 max = input[0];
|
||||
ForArrayStartingAt(i, input, 1) {
|
||||
if (input[i] > max) {
|
||||
max = input[i];
|
||||
}
|
||||
}
|
||||
|
||||
(*max_out) = max;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
double Basic_Max (double input1, double input2) {
|
||||
if (input1 > input2) return input1;
|
||||
else return input2;
|
||||
}
|
||||
|
||||
bool Basic_Is_Positive_Real (f32 input) {
|
||||
return (!(input <= 0.0 || isnan(input) || isinf(input)));
|
||||
}
|
||||
|
||||
bool Basic_Is_Positive_Real (f64 input) {
|
||||
return (!(input <= 0.0 || isnan(input) || isinf(input)));
|
||||
}
|
||||
|
||||
Native_Error* Basic_Standard_Deviation2 (ArrayView<f64> input, f64* stddev) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(stddev);
|
||||
|
||||
f64 mean = 0.0;
|
||||
Basic_Mean2(input, &mean);
|
||||
|
||||
f64 sum_of_squared_differences = 0;
|
||||
ForArray(i, input) {
|
||||
sum_of_squared_differences += (input[i] - mean) * (input[i] - mean);
|
||||
}
|
||||
|
||||
(*stddev) = sqrt(sum_of_squared_differences / (f64)input.count);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Variance2 (ArrayView<f64> input, f64* variance) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(variance);
|
||||
|
||||
f64 mean = 0.0;
|
||||
Basic_Mean2(input, &mean);
|
||||
|
||||
f64 sum_of_squared_differences = 0;
|
||||
ForArray(i, input) {
|
||||
sum_of_squared_differences += (input[i] - mean) * (input[i] - mean);
|
||||
}
|
||||
|
||||
f64 sample = 1;
|
||||
|
||||
(*variance) = (sum_of_squared_differences / (f64)(sample ? (input.count - 1) : input.count));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Root_Mean_Squared2 (ArrayView<f64> input, f64* rms) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(rms);
|
||||
|
||||
f64 square = 0;
|
||||
ForArray(i, input) {
|
||||
square += pow(input[i], 2);
|
||||
}
|
||||
f64 mean = (square / ((f64)input.count));
|
||||
|
||||
(*rms) = sqrt(mean);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_IndexSort2 (ArrayView<f64> input, ArrayView<s64> output) {
|
||||
Array_Check(input);
|
||||
Array_Check(output);
|
||||
|
||||
ForArray(i, input) { output[i] = i; }
|
||||
ForArray(i, input) {
|
||||
for (s64 j = i; j > 0; j -= 1) {
|
||||
if (input[output[j]] > input[output[j-1]]) {
|
||||
s64 temp = output[j];
|
||||
output[j] = output[j - 1];
|
||||
output[j - 1] = temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Count_Non_Nan2 (ArrayView<f64> input, s64* non_nan_count) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(non_nan_count);
|
||||
|
||||
s64 count = 0;
|
||||
|
||||
ForArray(i, input) {
|
||||
if (!isnan(input[i])) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
(*non_nan_count) = count;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Calculate_Percentile_New (ArrayView<f64> input, f64 percentile, f64* percentile_value_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(percentile_value_out);
|
||||
|
||||
Assert(percentile >= 0.0 && percentile <= 1.0);
|
||||
|
||||
qsort(input.data, input.count, sizeof(f64), qsort_doubles_comparator);
|
||||
s64 non_nan_count = 0;
|
||||
Assert(Basic_Count_Non_Nan2(input, &non_nan_count) == nullptr);
|
||||
|
||||
if (non_nan_count == 0) {
|
||||
(*percentile_value_out) = NAN;
|
||||
return New_Warning("All values in the input array are `NAN`!");
|
||||
}
|
||||
|
||||
auto r = percentile * non_nan_count;
|
||||
auto k = floor(r + 0.5);
|
||||
|
||||
auto kp1 = k + 1;
|
||||
|
||||
// Ratio between the K and K+1 rows:
|
||||
r = r - k;
|
||||
|
||||
// Find indices that are out of the range 1 to n and cap them:
|
||||
if (k < 1 || isnan(k)) {
|
||||
k = 1;
|
||||
}
|
||||
|
||||
// kp1 = min( kp1, n );
|
||||
if (non_nan_count < kp1) { kp1 = (f64)non_nan_count; }
|
||||
|
||||
// Use simple linear interpolation for the valid percentages:
|
||||
// y = (0.5+r).*x(kp1,:)+(0.5-r).*x(k,:); // yuck.
|
||||
s64 kp1_i = static_cast<s64>(kp1);
|
||||
s64 k_i = static_cast<s64>(k);
|
||||
|
||||
f64 y_first_part = (0.5 + r) * input[kp1_i - 1];
|
||||
f64 y_second_part = (0.5 - r) * input[k_i - 1];
|
||||
auto y = y_first_part + y_second_part;
|
||||
|
||||
// Make sure that values we hit exactly are copied rather than interpolated:
|
||||
if (r == -0.5) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Make sure that identical values are copied rather than interpolated:
|
||||
if (input[k_i-1] == input[kp1_i-1]) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
(*percentile_value_out) = y;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_ReverseArrayInPlace (ArrayView<f64> input) {
|
||||
Array_Check(input);
|
||||
ForUpTo(i, input.count/2) {
|
||||
f64 temp = input[i];
|
||||
input[i] = input[input.count - i - 1];
|
||||
input[input.count - i - 1] = temp;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Native_Error* Basic_Reverse_Array (int* input, int input_length) {
|
||||
// for (int i = 0; i < input_length / 2; i++) {
|
||||
// // Swap the ith and (input_length - i - 1)th elements
|
||||
// int temp = input[i];
|
||||
// input[i] = input[input_length - i - 1];
|
||||
// input[input_length - i - 1] = temp;
|
||||
// }
|
||||
|
||||
// return nullptr;
|
||||
// }
|
||||
|
||||
// Native_Error* Basic_Reverse_Array (double* input, int input_length) {
|
||||
// for (int i = 0; i < input_length / 2; i++) {
|
||||
// // Swap the ith and (input_length - i - 1)th elements
|
||||
// double temp = input[i];
|
||||
// input[i] = input[input_length - i - 1];
|
||||
// input[input_length - i - 1] = temp;
|
||||
// }
|
||||
|
||||
// return nullptr;
|
||||
// }
|
||||
|
||||
// #TODO: This should be for NDArray or 2DArray. idk.
|
||||
Native_Error* Basic_2DArrayInvertMemoryOrder (ArrayView<f64> input, s64 first_dimension, s64 second_dimension, ArrayView<f64> output) {
|
||||
Array_Check(input);
|
||||
Array_Check(output);
|
||||
if (output.count < input.count) { return New_Error("`input.count` should not exceed `output.count`!"); }
|
||||
Assert(first_dimension * second_dimension == input.count);
|
||||
Assert(input.count == output.count);
|
||||
|
||||
ForUpTo(i, first_dimension) {
|
||||
ForUpTo(j, second_dimension) {
|
||||
output[j + second_dimension * i] = input[i + first_dimension * j];
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool sort_doubles_comparator(double a, double b) {
|
||||
if (isnan(a)) return false; // NaN values are considered greater
|
||||
if (isnan(b)) return true; // Non-NaN values are considered smaller
|
||||
return a < b; // Normal comparison for non-NaN values
|
||||
}
|
||||
|
||||
int qsort_doubles_comparator_nonnan(const void* a, const void* b) {
|
||||
double val1 = (*(const double*)a);
|
||||
double val2 = (*(const double*)b);
|
||||
|
||||
if (val1 < val2) return -1;
|
||||
if (val1 > val2) return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qsort_doubles_comparator(const void* a, const void* b) {
|
||||
double val1 = (*(const double*)a);
|
||||
double val2 = (*(const double*)b);
|
||||
if (isnan(val1)) return 1; // NaN values are considered greater
|
||||
if (isnan(val2)) return -1; // Non-NaN values are considered smaller
|
||||
|
||||
if (val1 < val2) return -1;
|
||||
if (val1 > val2) return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Native_Error* Basic_CalculatePercentileNoSort (ArrayView<f64> input, f64 percentile, f64* percentile_value_out) {
|
||||
Array_Check(input);
|
||||
Null_Pointer_Check(percentile_value_out);
|
||||
|
||||
Assert(percentile >= 0.0 && percentile <= 1.0);
|
||||
|
||||
s64 non_nan_count = input.count;
|
||||
|
||||
auto r = percentile * non_nan_count;
|
||||
auto k = floor(r + 0.5);
|
||||
|
||||
auto kp1 = k + 1;
|
||||
|
||||
// Ratio between the K and K+1 rows:
|
||||
r = r - k;
|
||||
|
||||
// Find indices that are out of the range 1 to n and cap them:
|
||||
if (k < 1 || isnan(k)) {
|
||||
k = 1;
|
||||
}
|
||||
|
||||
// kp1 = min( kp1, n );
|
||||
if (non_nan_count < kp1) { kp1 = (f64)non_nan_count; }
|
||||
|
||||
// Use simple linear interpolation for the valid percentages:
|
||||
// y = (0.5+r).*x(kp1,:)+(0.5-r).*x(k,:); // yuck.
|
||||
s64 kp1_i = static_cast<s64>(kp1);
|
||||
s64 k_i = static_cast<s64>(k);
|
||||
|
||||
f64 y_first_part = (0.5 + r) * input[kp1_i - 1];
|
||||
f64 y_second_part = (0.5 - r) * input[k_i - 1];
|
||||
auto y = y_first_part + y_second_part;
|
||||
|
||||
// Make sure that values we hit exactly are copied rather than interpolated:
|
||||
if (r == -0.5) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Make sure that identical values are copied rather than interpolated:
|
||||
if (input[k_i-1] == input[kp1_i-1]) {
|
||||
(*percentile_value_out) = input[k_i - 1];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
(*percentile_value_out) = y;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Replace_Outliers2 (ArrayView<f64> input, f64 outlier_threshold) {
|
||||
Array_Check(input);
|
||||
Assert(outlier_threshold > 0);
|
||||
|
||||
auto input_copy = array_copy(input);
|
||||
|
||||
qsort(input_copy.data, input_copy.count, sizeof(f64), qsort_doubles_comparator_nonnan);
|
||||
f64 Q1 = 0.0;
|
||||
f64 Q3 = 0.0;
|
||||
|
||||
Assert(Basic_CalculatePercentileNoSort(input_copy, 0.25, &Q1) == nullptr);
|
||||
Assert(Basic_CalculatePercentileNoSort(input_copy, 0.75, &Q3) == nullptr);
|
||||
|
||||
f64 IQR = Q3 - Q1;
|
||||
f64 iqr_outlier_threshold = IQR * outlier_threshold;
|
||||
|
||||
// Identify points below Q1 - outlier_threshold, and above Q3 + outlier_threshold
|
||||
auto low_threshold = Q1 - iqr_outlier_threshold;
|
||||
auto high_threshold = Q3 + iqr_outlier_threshold;
|
||||
|
||||
ForArrayStartingAt(i, input, 1) {
|
||||
if (input[i] < low_threshold || input[i] > high_threshold) {
|
||||
input[i] = input[i-1];
|
||||
}
|
||||
}
|
||||
|
||||
array_free(input_copy);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Native_Error* Basic_Replace_Values_Beyond_Threshold2 (ArrayView<f64> input, f64 low_threshold, f64 high_threshold, f64 replacement_value) {
|
||||
Array_Check(input);
|
||||
|
||||
ForArray(i, input) {
|
||||
if (input[i] < low_threshold || input[i] > high_threshold) {
|
||||
input[i] = replacement_value;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
/* // #TODO: Replace with version that doesn't use Eigen
|
||||
Native_Error* Basic_Roots_To_Polynomials2 (ArrayView<f64> roots, ArrayView<f64> polynomials) {
|
||||
Array_Check(roots);
|
||||
|
||||
s64 root_count = roots.count;
|
||||
|
||||
if (root_count == 0) { return New_Error("`roots.count` is zero!"); }
|
||||
if (polynomials.count < root_count + 1) {
|
||||
return New_Error("`polynomials.count` should be roots.count + 1!");
|
||||
}
|
||||
|
||||
// For real roots
|
||||
Eigen::VectorXd roots_vec = Eigen::Map<Eigen::VectorXd>(roots.data, root_count);
|
||||
// c = [1 zeros(1,n,class(x))];
|
||||
Eigen::VectorXd c = Eigen::VectorXd::Zero(root_count + 1);
|
||||
c[0] = 1.0;
|
||||
// for j = 1:n
|
||||
// c[1] = c[1] - roots_vec[0] * c[0]; // Extract first index
|
||||
ForArray(i, roots) {
|
||||
// c(2:(j+1)) = c(2:(j+1)) - e(j).*c(1:j);
|
||||
Eigen::VectorXd val_temp = c.segment(1, i + 1) - roots_vec[i] * c.segment(0, i + 1);
|
||||
c.segment(1, i + 1) = val_temp;
|
||||
}
|
||||
// The result should be real if the roots are complex conjugates.
|
||||
memcpy(polynomials.data, c.data(), (root_count + 1) * sizeof(f64));
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
*/
|
||||
|
||||
Complex exponential (Complex cx) {
|
||||
f64 e = std::exp(cx.real);
|
||||
return Complex(e * std::cos(cx.imag), e * std::sin(cx.imag));
|
||||
}
|
||||
|
||||
Complex conjugate (Complex cx) {
|
||||
return Complex(cx.real, -cx.imag);
|
||||
}
|
||||
|
||||
f64 fabs(Complex cx) {
|
||||
return sqrt(cx.real * cx.real + cx.imag * cx.imag);
|
||||
}
|
||||
148
lib/Base/Basic.h
Normal file
148
lib/Base/Basic.h
Normal file
@ -0,0 +1,148 @@
|
||||
#pragma once
|
||||
|
||||
#include "error-codes.h"
|
||||
#include "Array.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
int qsort_doubles_comparator_nonnan(const void* a, const void* b);
|
||||
int qsort_doubles_comparator(const void* a, const void* b);
|
||||
|
||||
// @brief Calculates difference and approximate derivative for 1-dimensional data
|
||||
// Caller needs to supply memory for output and understand that output_length = input_length - 1
|
||||
PROTOTYPING_API Native_Error* Basic_Difference2 (ArrayView<f64> input, ArrayView<f64>& output);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Mean2 (ArrayView<f64> input, f64* mean);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_QuickSortInPlace (ArrayView<f64> input);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Median2 (ArrayView<f64> unsorted_input, f64* median);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_RescaleInPlace (ArrayView<f64> input, double min, double max);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Min2 (ArrayView<f64> input, f64* min_out);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Max2 (ArrayView<f64> input, f64* max_out);
|
||||
|
||||
double Basic_Max (double input1, double input2);
|
||||
|
||||
bool Basic_Is_Positive_Real (f32 input);
|
||||
bool Basic_Is_Positive_Real (f64 input);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Standard_Deviation2 (ArrayView<f64> input, f64* stddev);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Variance2 (ArrayView<f64> input, f64* variance);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Root_Mean_Squared2 (ArrayView<f64> input, f64* rms);
|
||||
|
||||
// Sorts an array from largest to smallest, returning the indices of the sorted array
|
||||
PROTOTYPING_API Native_Error* Basic_IndexSort2 (ArrayView<f64> input, ArrayView<s64> output);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Count_Non_Nan2 (ArrayView<f64> input, s64* non_nan_count);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Calculate_Percentile_New (ArrayView<f64> input, f64 percentile, f64* percentile_value_out);
|
||||
|
||||
// Does not include sort, because sorting is slow, and we may need to call this multiple
|
||||
// times with the same sorted input.
|
||||
PROTOTYPING_API Native_Error* Basic_CalculatePercentileNoSort (ArrayView<f64> input, f64 percentile, f64* percentile_value_out);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_ReverseArrayInPlace (ArrayView<f64> input);
|
||||
// Native_Error* Basic_Reverse_Array (double* input, int input_length);
|
||||
// Native_Error* Basic_Reverse_Array (int* input, int input_length);
|
||||
|
||||
// Switches from row-order to column-order or vice-versa. #NOTE: you must know what the order
|
||||
// and dimensions of the data are to begin with!!
|
||||
PROTOTYPING_API Native_Error* Basic_2DArrayInvertMemoryOrder (ArrayView<f64> input, s64 first_dimension, s64 second_dimension, ArrayView<f64> output);
|
||||
|
||||
// In-place replacement of outliers (using interquartile method, with threshold of 1.5) with nearest values.
|
||||
PROTOTYPING_API Native_Error* Basic_Replace_Outliers2 (ArrayView<f64> input, f64 outlier_threshold=1.5);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Replace_Values_Beyond_Threshold2 (ArrayView<f64> input, f64 low_threshold, f64 high_threshold, f64 replacement_value);
|
||||
|
||||
PROTOTYPING_API Native_Error* Basic_Roots_To_Polynomials2 (ArrayView<f64> roots, ArrayView<f64> polynomials);
|
||||
|
||||
// #TODO: Basic_Find (returns indices of non-zero elements).
|
||||
// Need to make this generic, maybe using templates?
|
||||
// PROTOTYPING_API ArrayView<s32> Basic_Find(ArrayView<f64> x, void* condition);
|
||||
|
||||
// Add parameters for peak prominence, height, etc.
|
||||
// PROTOTYPING_API Native_Error* Basic_Find_Peaks (double* input, int input_length, int* peak_indices, int* peak_count);
|
||||
|
||||
struct Complex {
|
||||
f64 real; f64 imag;
|
||||
|
||||
Complex() { real = 0; imag = 0; }
|
||||
Complex(f64 _real) { real = _real; imag = 0; }
|
||||
Complex(f64 _real, f64 _imag) { real = _real; imag = _imag; }
|
||||
|
||||
Complex operator+(const Complex& other) const {
|
||||
return Complex(real + other.real, imag + other.imag);
|
||||
}
|
||||
|
||||
Complex operator-(const Complex& other) const {
|
||||
return Complex(real - other.real, imag - other.imag);
|
||||
}
|
||||
|
||||
Complex operator*(const Complex& other) const {
|
||||
return Complex(
|
||||
real * other.real - imag * other.imag,
|
||||
real * other.imag + imag * other.real
|
||||
);
|
||||
}
|
||||
|
||||
Complex operator/(const Complex& other) const {
|
||||
f64 denom = other.real * other.real + other.imag * other.imag;
|
||||
return Complex(
|
||||
(real * other.real + imag * other.imag) / denom,
|
||||
(imag * other.real - real * other.imag) / denom
|
||||
);
|
||||
}
|
||||
|
||||
Complex& operator+=(const Complex& other) {
|
||||
real += other.real;
|
||||
imag += other.imag;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Complex& operator-=(const Complex& other) {
|
||||
real -= other.real;
|
||||
imag -= other.imag;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Complex& operator*=(const Complex& other) {
|
||||
f64 r = real * other.real - imag * other.imag;
|
||||
f64 i = real * other.imag + imag * other.real;
|
||||
real = r;
|
||||
imag = i;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Complex& operator/=(const Complex& other) {
|
||||
f64 denom = other.real * other.real + other.imag * other.imag;
|
||||
f64 r = (real * other.real + imag * other.imag) / denom;
|
||||
f64 i = (imag * other.real - real * other.imag) / denom;
|
||||
real = r;
|
||||
imag = i;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const Complex& other) const {
|
||||
return real == other.real && imag == other.imag;
|
||||
}
|
||||
|
||||
bool operator!=(const Complex& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
struct Complex32 { f32 real; f32 imag; };
|
||||
|
||||
Complex exponential (Complex cx);
|
||||
Complex conjugate (Complex cx);
|
||||
f64 fabs(Complex cx);
|
||||
110
lib/Base/ErrorCodes.cpp
Normal file
110
lib/Base/ErrorCodes.cpp
Normal file
@ -0,0 +1,110 @@
|
||||
#include "error-codes.h"
|
||||
|
||||
#include "General_Purpose_Allocator.h"
|
||||
#include <stdio.h> // vsnprintf, printf
|
||||
#include <cstdarg> // va_list...
|
||||
|
||||
// Should always be false when making python bindings.
|
||||
#define BREAK_ON_WARNINGS 0
|
||||
#define BREAK_ON_ERRORS 0
|
||||
#define BREAK_ON_FATAL_ERROR BUILD_DEBUG
|
||||
#define ALWAYS_PRINT_ERROR_MESSAGES BUILD_DEBUG
|
||||
|
||||
Native_Error* Create_New_Native_Error_Internal(char* format, va_list args) {
|
||||
constexpr s64 ERROR_BUFFER_COUNT = 512;
|
||||
|
||||
auto error = ALLOCATE(Native_Error);
|
||||
error->data = ALLOCATE_RAW_ARRAY(ERROR_BUFFER_COUNT, u8);
|
||||
|
||||
// You MUST copy the va_list before using it more than once
|
||||
va_list args_copy;
|
||||
va_copy(args_copy, args);
|
||||
error->count = (s64)vsnprintf((char*)error->data, (size_t)ERROR_BUFFER_COUNT, format, args_copy);
|
||||
va_end(args_copy);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* New_Fatal_Error_Internal(char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
auto error = Create_New_Native_Error_Internal(format, args);
|
||||
va_end(args);
|
||||
|
||||
error->severity = SEVERITY_FATAL;
|
||||
#if BUILD_DEBUG && ALWAYS_PRINT_ERROR_MESSAGES
|
||||
printf("[FATAL ERROR] %.*s\n", (s32)error->count, (char*)error->data);
|
||||
#endif
|
||||
#if BREAK_ON_FATAL_ERROR
|
||||
debug_break();
|
||||
#endif
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* Native_Error_Callstack(Native_Error* new_error, Native_Error* old_error, ErrorSeverity severity) {
|
||||
auto error_message = format_string("%s\n > %s", new_error->data, old_error->data).data;
|
||||
|
||||
Cleanup_Error(new_error);
|
||||
Cleanup_Error(old_error);
|
||||
|
||||
Native_Error* error_merged = ALLOCATE(Native_Error);
|
||||
error_merged->data = (u8*)error_message;
|
||||
error_merged->count = strlen((char*)error_merged->data);
|
||||
error_merged->severity = severity;
|
||||
|
||||
return error_merged;
|
||||
}
|
||||
|
||||
Native_Error* Native_Error_Test() {
|
||||
// This is quite verbose, but w/e
|
||||
auto old_error = New_Error("Original error...");
|
||||
auto new_message = format_string("Failed to start stream. Error Code: %d", -1).data;
|
||||
auto new_error = New_Error(new_message);
|
||||
GPAllocator_Delete(new_message);
|
||||
|
||||
return Native_Error_Callstack(new_error, old_error, SEVERITY_NON_FATAL);
|
||||
}
|
||||
|
||||
Native_Error* New_Error_Internal(char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
auto error = Create_New_Native_Error_Internal(format, args);
|
||||
va_end(args);
|
||||
|
||||
error->severity = SEVERITY_NON_FATAL;
|
||||
#if BUILD_DEBUG && ALWAYS_PRINT_ERROR_MESSAGES
|
||||
printf("[ERROR (NON-FATAL)] %.*s\n", (s32)error->count, (char*)error->data);
|
||||
#endif
|
||||
#if BREAK_ON_ERRORS
|
||||
debug_break();
|
||||
#endif
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* New_Warning_Internal(char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
auto error = Create_New_Native_Error_Internal(format, args);
|
||||
va_end(args);
|
||||
|
||||
error->severity = SEVERITY_WARNING;
|
||||
#if BUILD_DEBUG && ALWAYS_PRINT_ERROR_MESSAGES
|
||||
printf("[WARNING] %.*s\n", (s32)error->count, (char*)error->data);
|
||||
#endif
|
||||
#if BREAK_ON_WARNINGS
|
||||
debug_break();
|
||||
#endif
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
Native_Error* Cleanup_Error(Native_Error* error) {
|
||||
if (error == nullptr) return nullptr;
|
||||
|
||||
GPAllocator_Delete(error->data);
|
||||
GPAllocator_Delete(error);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
161
lib/Base/General_Purpose_Allocator.cpp
Normal file
161
lib/Base/General_Purpose_Allocator.cpp
Normal file
@ -0,0 +1,161 @@
|
||||
// #TODO: Make this not MSVC-centric with alias for std::aligned_alloc / _aligned_malloc
|
||||
// #TODO: Define GPAllocator_Proc
|
||||
#include "General_Purpose_Allocator.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
#include <mutex>
|
||||
General_Allocator gAllocator; // @Shared
|
||||
std::mutex allocator_mutex;
|
||||
#endif
|
||||
|
||||
#if !COMPILER_MSVC
|
||||
// Note: There is *no* aligned_realloc. Must implement manually if needed.
|
||||
force_inline void* gp_aligned_realloc(u64 old_size, void* ptr, u64 new_size, u64 alignment) {
|
||||
if (!ptr || old_size == 0) return std::aligned_alloc(alignment, new_size);
|
||||
if (new_size == 0) { std::free(ptr); return nullptr; }
|
||||
|
||||
// Allocate new block
|
||||
void* new_ptr = std::aligned_alloc(alignment, new_size);
|
||||
if (!new_ptr) return nullptr;
|
||||
|
||||
u64 copy_size = old_size < new_size ? old_size : new_size;
|
||||
memcpy(new_ptr, ptr, copy_size);
|
||||
|
||||
std::free(ptr);
|
||||
return new_ptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
General_Allocator* get_general_allocator_data() {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
return &gAllocator;
|
||||
#else
|
||||
return (General_Allocator*)nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
constexpr s64 Allocation_Tracking_Is_Enabled = GP_ALLOCATOR_TRACK_ALLOCATIONS;
|
||||
|
||||
bool GPAllocator_Tracking_Enabled() {
|
||||
return Allocation_Tracking_Is_Enabled != 0;
|
||||
}
|
||||
|
||||
void GPAllocator_Initialize_Allocation_Tracker() {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
constexpr s64 alignment = 64;
|
||||
s64 item_count_max = 64 * 4096;
|
||||
s64 total_allocation_size = item_count_max * sizeof(Allocation);
|
||||
auto memory = Aligned_Alloc(total_allocation_size, alignment); // @MemoryLeak (intentional)
|
||||
gAllocator.allocations = Array<Allocation>(item_count_max, memory, item_count_max, alignment);
|
||||
gAllocator.allocations.count = 0; // Init to zero.
|
||||
#endif
|
||||
}
|
||||
|
||||
bool GPAllocator_Is_This_Yours(void* old_memory) {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
std::lock_guard<std::mutex> lock(allocator_mutex);
|
||||
|
||||
s64 old_size = 0;
|
||||
for (s64 i = 0; i < gAllocator.allocations.count; i += 1) {
|
||||
if (gAllocator.allocations[i].memory != old_memory)
|
||||
continue;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void Add_Allocation(s64 new_size, void* new_memory_address, s32 alignment) {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
if (new_memory_address == nullptr) return;
|
||||
|
||||
std::lock_guard<std::mutex> lock(allocator_mutex);
|
||||
Allocation allocation = {new_size, new_memory_address, alignment};
|
||||
array_add(gAllocator.allocations, allocation);
|
||||
|
||||
gAllocator.total_bytes_allocated += new_size;
|
||||
#endif
|
||||
}
|
||||
|
||||
void Remove_Allocation(void* old_memory) {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
std::lock_guard<std::mutex> lock(allocator_mutex);
|
||||
|
||||
s64 old_size = 0;
|
||||
for (s64 i = 0; i < gAllocator.allocations.count; i += 1) {
|
||||
if (gAllocator.allocations[i].memory != old_memory)
|
||||
continue;
|
||||
old_size = gAllocator.allocations[i].size;
|
||||
array_unordered_remove_by_index(gAllocator.allocations, i);
|
||||
|
||||
gAllocator.total_bytes_allocated -= old_size;
|
||||
return;
|
||||
}
|
||||
|
||||
Assert(false); // "Did not find allocation in Array"
|
||||
#endif
|
||||
}
|
||||
|
||||
void* GPAllocator_New (s64 new_size, s64 alignment, bool initialize) {
|
||||
// Fallback allocator: _aligned_malloc, which is MSVC's version of std::aligned_alloc
|
||||
|
||||
auto memory = Aligned_Alloc(new_size, alignment);
|
||||
// _aligned_malloc does not zero memory, so we can zero it here
|
||||
if (initialize && memory) { memset(memory, 0, new_size); }
|
||||
Add_Allocation(new_size, memory, (s32)alignment);
|
||||
return memory;
|
||||
}
|
||||
|
||||
void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alignment, bool initialize) {
|
||||
Assert((alignment % 8) == 0 && (alignment != 0));
|
||||
|
||||
if (old_memory == nullptr) {
|
||||
return GPAllocator_New(new_size, alignment);
|
||||
}
|
||||
|
||||
// Debug version: _aligned_realloc_dbg
|
||||
auto new_memory_address = Aligned_Realloc(old_size, old_memory, new_size, alignment);
|
||||
|
||||
if (initialize && new_memory_address && new_size > old_size) {
|
||||
memset((u8*)new_memory_address + old_size, 0, new_size - old_size);
|
||||
}
|
||||
Remove_Allocation(old_memory);
|
||||
Add_Allocation(new_size, new_memory_address, (s32)alignment);
|
||||
return new_memory_address;
|
||||
}
|
||||
|
||||
void GPAllocator_Delete (void* memory) {
|
||||
if (memory == nullptr) return;
|
||||
Aligned_Free(memory);
|
||||
Remove_Allocation(memory);
|
||||
}
|
||||
|
||||
Allocator GPAllocator() {
|
||||
return { GPAllocator_Proc, nullptr };
|
||||
}
|
||||
|
||||
void* GPAllocator_Proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
|
||||
u16 alignment = get_thread_context()->GPAllocator_alignment;
|
||||
switch (mode) {
|
||||
case Allocator_Mode::ALLOCATE: {
|
||||
return GPAllocator_New(requested_size, alignment);
|
||||
} break;
|
||||
case Allocator_Mode::RESIZE: {
|
||||
void* result = GPAllocator_Resize(old_size, old_memory, requested_size, alignment);
|
||||
s64 size_to_copy = old_size < requested_size ? old_size : requested_size;
|
||||
if (result && size_to_copy) {
|
||||
memcpy(result, old_memory, size_to_copy);
|
||||
}
|
||||
return result;
|
||||
} break;
|
||||
case Allocator_Mode::DEALLOCATE:
|
||||
GPAllocator_Delete(old_memory); // unused
|
||||
break;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
64
lib/Base/General_Purpose_Allocator.h
Normal file
64
lib/Base/General_Purpose_Allocator.h
Normal file
@ -0,0 +1,64 @@
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
#include "Array.h"
|
||||
|
||||
#define GP_ALLOCATOR_TRACK_ALLOCATIONS BUILD_DEBUG
|
||||
#define GP_ALLOCATOR_VERY_DEBUG 0
|
||||
|
||||
#if COMPILER_MSVC
|
||||
#include <malloc.h> // _aligned_malloc, _aligned_realloc, _aligned_free (MSVC Only!)
|
||||
#if GP_ALLOCATOR_VERY_DEBUG
|
||||
#define Aligned_Alloc(sz, align) _aligned_malloc_dbg(sz, align, __FILE__, __LINE__)
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc_dbg(ptr, sz, align, __FILE__, __LINE__)
|
||||
#define Aligned_Free(ptr) _aligned_free_dbg(ptr)
|
||||
#else
|
||||
#define Aligned_Alloc(sz, align) _aligned_malloc(sz, align)
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) _aligned_realloc(ptr, sz, align)
|
||||
#define Aligned_Free(ptr) _aligned_free(ptr)
|
||||
#endif
|
||||
#else // Non-MSVC (POSIX / GCC / Clang)
|
||||
#include <cstdlib> // std::aligned_alloc
|
||||
#define Aligned_Alloc(sz, align) std::aligned_alloc(align, sz)
|
||||
#define Aligned_Realloc(old_sz, ptr, sz, align) gp_aligned_realloc(old_sz, ptr, sz, align)
|
||||
#define Aligned_Free(ptr) std::free(ptr)
|
||||
#endif
|
||||
|
||||
struct Allocation {
|
||||
s64 size;
|
||||
void* memory;
|
||||
s32 alignment;
|
||||
};
|
||||
|
||||
struct General_Allocator {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
// NOTE: This is VERY slow, a hashmap is better suited here,
|
||||
// but this is just a quick and dirty solution for now.
|
||||
Array<Allocation> allocations;
|
||||
s64 total_bytes_allocated = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
General_Allocator* get_general_allocator_data();
|
||||
|
||||
constexpr u16 GPAllocator_Default_Alignment = 16;
|
||||
|
||||
// #TODO: #REMOVE these:
|
||||
#define ALLOCATE(type) \
|
||||
(type*)GPAllocator_New(sizeof(type))
|
||||
|
||||
#define ALLOCATE_RAW_ARRAY(length, type) \
|
||||
(type*)GPAllocator_New(sizeof(type) * (length))
|
||||
|
||||
Allocator GPAllocator();
|
||||
|
||||
void* GPAllocator_Proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
|
||||
void* GPAllocator_New (s64 new_size, s64 alignment=16, bool initialize=true);
|
||||
void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alignment=16, bool initialize=true);
|
||||
void GPAllocator_Delete (void* memory);
|
||||
|
||||
bool GPAllocator_Is_This_Yours(void* old_memory);
|
||||
void GPAllocator_Initialize_Allocation_Tracker();
|
||||
bool GPAllocator_Tracking_Enabled();
|
||||
|
||||
82
lib/Base/String.cpp
Normal file
82
lib/Base/String.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
// #TODO: Integrate Allocator / context.allocator
|
||||
|
||||
#include "Base_String.h"
|
||||
|
||||
#include "General_Purpose_Allocator.h"
|
||||
#include <stdio.h> // vsnprintf
|
||||
#include <cstdarg> // va_list, ...
|
||||
|
||||
bool is_c_string(string n_string) {
|
||||
return (n_string.data && n_string.data[n_string.count] == '\0');
|
||||
}
|
||||
|
||||
bool is_valid(string n_string) {
|
||||
return (n_string.data != nullptr && n_string.count > 0);
|
||||
}
|
||||
|
||||
string format_string(char* format, ...) {
|
||||
constexpr s64 BUFFER_SIZE = 1024;
|
||||
|
||||
string str = {0};
|
||||
|
||||
str.data = ALLOCATE_RAW_ARRAY(BUFFER_SIZE, u8);
|
||||
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
// Note that this *is* null-terminated for compatibility.
|
||||
str.count = (s64)vsnprintf((char*)str.data, (size_t)BUFFER_SIZE, format, args);
|
||||
va_end(args);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
string copy_string(string original_string) {
|
||||
string str = {0};
|
||||
|
||||
str.data = ALLOCATE_RAW_ARRAY(original_string.count + 1, u8);
|
||||
memcpy(str.data, original_string.data, original_string.count);
|
||||
str.count = original_string.count;
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
string copy_string(char* c_string) {
|
||||
string str = {0};
|
||||
s64 string_length = strlen(c_string);
|
||||
|
||||
str.data = ALLOCATE_RAW_ARRAY(string_length + 1, u8);
|
||||
memcpy(str.data, c_string, string_length);
|
||||
str.count = string_length;
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
bool strings_match(string first_string, string second_string) {
|
||||
if (first_string.count != second_string.count) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (s64 i = 0; i < first_string.count; i += 1) {
|
||||
if (first_string.data[i] != second_string.data[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
string string_from_literal(char* literal) {
|
||||
string new_string;
|
||||
new_string.count = strlen(literal);
|
||||
new_string.data = (u8*) literal;
|
||||
|
||||
return new_string;
|
||||
}
|
||||
|
||||
void free(string& n_string) {
|
||||
// #TODO: Get context allocator?
|
||||
GPAllocator_Delete(n_string.data);
|
||||
|
||||
n_string.data = nullptr;
|
||||
n_string.count = 0;
|
||||
}
|
||||
16
lib/Base/Timing.cpp
Normal file
16
lib/Base/Timing.cpp
Normal file
@ -0,0 +1,16 @@
|
||||
#include "Timing.h"
|
||||
|
||||
|
||||
#if OS_WINDOWS
|
||||
|
||||
f64 GetUnixTimestamp() {
|
||||
constexpr s64 FILETIME_TO_UNIX = 116444736000000000i64;
|
||||
FILETIME fileTime;
|
||||
GetSystemTimePreciseAsFileTime(&fileTime);
|
||||
s64 ticks = ((s64)fileTime.dwHighDateTime << (s64)32) | (s64)fileTime.dwLowDateTime;
|
||||
return (ticks - FILETIME_TO_UNIX) / (10.0 * 1000.0 * 1000.0);
|
||||
}
|
||||
|
||||
// #TODO: GetUnixTimestampNanoseconds
|
||||
|
||||
#endif
|
||||
5
lib/Base/Timing.h
Normal file
5
lib/Base/Timing.h
Normal file
@ -0,0 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
|
||||
f64 GetUnixTimestamp();
|
||||
58
lib/Base/error-codes.h
Normal file
58
lib/Base/error-codes.h
Normal file
@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
|
||||
enum ErrorSeverity: s32 {
|
||||
SEVERITY_WARNING = 0,
|
||||
SEVERITY_NON_FATAL = 1,
|
||||
SEVERITY_FATAL = 2
|
||||
};
|
||||
|
||||
#include "Base_String.h"
|
||||
// typedef struct string Native_Error;
|
||||
// Note: Native_Error should down-cast to a string.
|
||||
struct Native_Error {
|
||||
s64 count;
|
||||
u8* data;
|
||||
ErrorSeverity severity = SEVERITY_WARNING;
|
||||
};
|
||||
|
||||
#define Null_Pointer_Check(arg) \
|
||||
if (arg == nullptr) { \
|
||||
return New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s is a null pointer.", __FILE__, __LINE__, __FUNCTION__, Stringify(arg)); \
|
||||
}
|
||||
|
||||
#define Array_Check(arg) \
|
||||
if (!is_valid(arg)) { \
|
||||
return New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s is not a valid array.", __FILE__, __LINE__, __FUNCTION__, Stringify(arg)); \
|
||||
}
|
||||
|
||||
#define String_Check(arg) \
|
||||
if (!Is_Valid(arg)) { return New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s is not a valid string.", __FILE__, __LINE__, __FUNCTION__, Stringify(arg)); }
|
||||
|
||||
#define Error_Check(error) \
|
||||
if (error != nullptr) { \
|
||||
return error; \
|
||||
}
|
||||
|
||||
// An error from which the program cannot continue (e.g. a segmentation fault)
|
||||
#define New_Fatal_Error(message) \
|
||||
New_Fatal_Error_Internal("%s:%d\n[%s] Error: %s.", __FILE__, __LINE__, __FUNCTION__, message)
|
||||
|
||||
#define New_Error(message) \
|
||||
New_Error_Internal("%s:%d\n[%s] Error: %s.", __FILE__, __LINE__, __FUNCTION__, message)
|
||||
|
||||
#define New_Warning(message) \
|
||||
New_Warning_Internal("%s:%d\n[%s] Warning: %s.", __FILE__, __LINE__, __FUNCTION__, message)
|
||||
|
||||
Native_Error* New_Fatal_Error_Internal(char* raw_message, ...);
|
||||
|
||||
Native_Error* New_Error_Internal(char* raw_message, ...);
|
||||
|
||||
Native_Error* New_Warning_Internal(char* raw_message, ...);
|
||||
|
||||
Native_Error* Native_Error_Callstack(Native_Error* new_error, Native_Error* old_error, ErrorSeverity severity);
|
||||
|
||||
PROTOTYPING_API C_API Native_Error* Cleanup_Error(Native_Error* error);
|
||||
|
||||
PROTOTYPING_API C_API Native_Error* Native_Error_Test();
|
||||
16
lib/Base/meta_generated.h
Normal file
16
lib/Base/meta_generated.h
Normal file
@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
constexpr const char* MUSA_LIB_VERSION = "0.1a";
|
||||
#define BUILD_DEBUG 1
|
||||
#define OS_WINDOWS 1
|
||||
#define OS_LINUX 0
|
||||
#define OS_MACOS 0
|
||||
#define OS_ANDROID 0
|
||||
#define OS_IOS 0
|
||||
#define ARCH_CPU_X64 1
|
||||
#define ARCH_CPU_ARM64 0
|
||||
#define OS_IS_UNIX 0
|
||||
#define COMPILER_MSVC 1
|
||||
#define COMPILER_CLANG 0
|
||||
#define ARRAY_ENABLE_BOUNDS_CHECKING 1
|
||||
#define COMPILER_GCC 0
|
||||
9
lib/api/lib_api.h
Normal file
9
lib/api/lib_api.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "Base.h"
|
||||
#include "error-codes.h"
|
||||
|
||||
#include "General_Purpose_Allocator.h"
|
||||
#include "Array.h"
|
||||
#include "Basic.h"
|
||||
|
||||
41
src/OS_Win32.cpp
Normal file
41
src/OS_Win32.cpp
Normal file
@ -0,0 +1,41 @@
|
||||
// Move into src/Win32.cpp
|
||||
|
||||
// #MOVE TO Base_Entry_Point.cpp
|
||||
internal void Bootstrap_Entry_Point () {
|
||||
// 0. Setup general purpose allocator
|
||||
GPAllocator_Initialize_Allocation_Tracker();
|
||||
// 1. Setup arena table
|
||||
initialize_arena_table();
|
||||
// 2. Setup thread local context
|
||||
Arena* arena = next_arena(Arena_Reserve::Size_64G);
|
||||
thread_local_context = New<Thread_Context>(get_allocator(arena));
|
||||
thread_local_context->temp = next_arena(Arena_Reserve::Size_64G);
|
||||
thread_local_context->arena = arena;
|
||||
thread_local_context->allocator = get_allocator(arena);
|
||||
thread_local_context->thread_idx = 0;
|
||||
thread_local_context->thread_name = "Main Thread";
|
||||
// thread_local_context.logger = init_logger();
|
||||
|
||||
// push_allocator(GPAllocator());
|
||||
// auto something = New<Thread_Context>();
|
||||
// auto something2 = New<Array<s64>>();
|
||||
|
||||
}
|
||||
|
||||
// #MOVE TO Base_Entry_Point.cpp
|
||||
internal void Main_Entry_Point (int argc, WCHAR **argv) {
|
||||
debug_break();
|
||||
|
||||
// #TODO: Run Tests?
|
||||
}
|
||||
|
||||
internal void Win32_Entry_Point (int argc, WCHAR **argv) {
|
||||
// See: w32_entry_point_caller(); (raddebugger)
|
||||
// [ ] SetUnhandledExceptionFilter
|
||||
// [ ] Setup Thread Context:
|
||||
Bootstrap_Entry_Point();
|
||||
|
||||
printf("Hello there!\n\n");
|
||||
// See: main_thread_base_entry_point
|
||||
Main_Entry_Point(argc, argv);
|
||||
}
|
||||
21
unity_build_exe.cpp
Normal file
21
unity_build_exe.cpp
Normal file
@ -0,0 +1,21 @@
|
||||
#include "unity_build_lib.cpp"
|
||||
#include "src/OS_Win32.cpp"
|
||||
// #include "src/OS_Linux.cpp"
|
||||
// #include "src/OS_MacOS.cpp"
|
||||
|
||||
// #include "imgui-docking.cpp"
|
||||
|
||||
|
||||
#if OS_WINDOWS
|
||||
#if BUILD_CONSOLE_INTERFACE
|
||||
int wmain(int argc, WCHAR **argv) {
|
||||
Win32_Entry_Point(argc, argv);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPWSTR lpCmdLine, int nShowCmd) {
|
||||
Win32_Entry_Point(__argc, __wargv);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
10
unity_build_lib.cpp
Normal file
10
unity_build_lib.cpp
Normal file
@ -0,0 +1,10 @@
|
||||
#include "lib/Base/Base.cpp"
|
||||
#include "lib/Base/Arena.cpp"
|
||||
#include "lib/Base/Arena_Table.cpp"
|
||||
#include "lib/Base/Base_Thread_Context.cpp"
|
||||
#include "lib/Base/General_Purpose_Allocator.cpp"
|
||||
#include "lib/Base/Allocator.cpp"
|
||||
#include "lib/Base/Basic.cpp"
|
||||
#include "lib/Base/ErrorCodes.cpp"
|
||||
#include "lib/Base/String.cpp"
|
||||
#include "lib/Base/Timing.cpp"
|
||||
Loading…
Reference in New Issue
Block a user