483 lines
14 KiB
C++
483 lines
14 KiB
C++
#include "Basic.h"
|
|
|
|
#include <cmath>
|
|
#include <math.h> // isnan, floor
|
|
#include <stdlib.h> // qsort
|
|
#include <cassert> // assert
|
|
|
|
|
|
Native_Error* Basic_Difference2 (ArrayView<f64> input, ArrayView<f64>& output) {
|
|
Array_Check(input);
|
|
Array_Check(output);
|
|
|
|
// ensure enough room. Note output.count = input.count
|
|
Assert(output.count >= input.count - 1);
|
|
|
|
ForUpTo(i, input.count-1) {
|
|
output[i] = input[i + 1] - input[i];
|
|
}
|
|
|
|
output.count = input.count - 1;
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Mean2 (ArrayView<f64> input, f64* mean) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(mean);
|
|
|
|
f64 sum = 0;
|
|
ForArray(i, input) {
|
|
sum += input[i];
|
|
}
|
|
|
|
(*mean) = (sum / (f64)input.count);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_QuickSortInPlace (ArrayView<f64> input) {
|
|
Array_Check(input);
|
|
|
|
qsort(input.data, input.count, sizeof(double), qsort_doubles_comparator_nonnan);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Median2 (ArrayView<f64> unsorted_input, f64* median) {
|
|
Array_Check(unsorted_input);
|
|
Null_Pointer_Check(median);
|
|
auto input_sorted = array_copy(unsorted_input);
|
|
qsort(input_sorted.data, (u64)input_sorted.count, sizeof(f64), qsort_doubles_comparator_nonnan);
|
|
|
|
s64 middle_element_index = unsorted_input.count / 2;
|
|
|
|
if (unsorted_input.count % 2 == 1) {
|
|
(*median) = input_sorted[middle_element_index];
|
|
} else {
|
|
(*median) = (input_sorted[middle_element_index - 1] + input_sorted[middle_element_index]) / 2.0;
|
|
}
|
|
|
|
array_free(input_sorted);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_RescaleInPlace (ArrayView<f64> input, double min, double max) {
|
|
Array_Check(input);
|
|
if (max < min || max == min) { return New_Error("Min or max inputs are not valid!"); }
|
|
|
|
f64 smallest_element; f64 largest_element;
|
|
auto error = Basic_Min2(input, &smallest_element);
|
|
if (error != nullptr) return error;
|
|
|
|
error = Basic_Max2(input, &largest_element);
|
|
if (error != nullptr) return error;
|
|
|
|
if (largest_element == smallest_element)
|
|
return nullptr;
|
|
|
|
ForArray(i, input) {
|
|
input[i] = (input[i] - smallest_element) / (largest_element - smallest_element) * (max - min) + min;
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Min2 (ArrayView<f64> input, f64* min_out) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(min_out);
|
|
|
|
f64 min = input[0];
|
|
ForArrayStartingAt(i, input, 1) {
|
|
if (input[i] < min) {
|
|
min = input[i];
|
|
}
|
|
}
|
|
|
|
(*min_out) = min;
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Max2 (ArrayView<f64> input, f64* max_out) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(max_out);
|
|
|
|
f64 max = input[0];
|
|
ForArrayStartingAt(i, input, 1) {
|
|
if (input[i] > max) {
|
|
max = input[i];
|
|
}
|
|
}
|
|
|
|
(*max_out) = max;
|
|
return nullptr;
|
|
}
|
|
|
|
double Basic_Max (double input1, double input2) {
|
|
if (input1 > input2) return input1;
|
|
else return input2;
|
|
}
|
|
|
|
bool Basic_Is_Positive_Real (f32 input) {
|
|
return (!(input <= 0.0 || isnan(input) || isinf(input)));
|
|
}
|
|
|
|
bool Basic_Is_Positive_Real (f64 input) {
|
|
return (!(input <= 0.0 || isnan(input) || isinf(input)));
|
|
}
|
|
|
|
Native_Error* Basic_Standard_Deviation2 (ArrayView<f64> input, f64* stddev) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(stddev);
|
|
|
|
f64 mean = 0.0;
|
|
Basic_Mean2(input, &mean);
|
|
|
|
f64 sum_of_squared_differences = 0;
|
|
ForArray(i, input) {
|
|
sum_of_squared_differences += (input[i] - mean) * (input[i] - mean);
|
|
}
|
|
|
|
(*stddev) = sqrt(sum_of_squared_differences / (f64)input.count);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Variance2 (ArrayView<f64> input, f64* variance) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(variance);
|
|
|
|
f64 mean = 0.0;
|
|
Basic_Mean2(input, &mean);
|
|
|
|
f64 sum_of_squared_differences = 0;
|
|
ForArray(i, input) {
|
|
sum_of_squared_differences += (input[i] - mean) * (input[i] - mean);
|
|
}
|
|
|
|
f64 sample = 1;
|
|
|
|
(*variance) = (sum_of_squared_differences / (f64)(sample ? (input.count - 1) : input.count));
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Root_Mean_Squared2 (ArrayView<f64> input, f64* rms) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(rms);
|
|
|
|
f64 square = 0;
|
|
ForArray(i, input) {
|
|
square += pow(input[i], 2);
|
|
}
|
|
f64 mean = (square / ((f64)input.count));
|
|
|
|
(*rms) = sqrt(mean);
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_IndexSort2 (ArrayView<f64> input, ArrayView<s64> output) {
|
|
Array_Check(input);
|
|
Array_Check(output);
|
|
|
|
ForArray(i, input) { output[i] = i; }
|
|
ForArray(i, input) {
|
|
for (s64 j = i; j > 0; j -= 1) {
|
|
if (input[output[j]] > input[output[j-1]]) {
|
|
s64 temp = output[j];
|
|
output[j] = output[j - 1];
|
|
output[j - 1] = temp;
|
|
}
|
|
}
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Count_Non_Nan2 (ArrayView<f64> input, s64* non_nan_count) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(non_nan_count);
|
|
|
|
s64 count = 0;
|
|
|
|
ForArray(i, input) {
|
|
if (!isnan(input[i])) {
|
|
count += 1;
|
|
}
|
|
}
|
|
|
|
(*non_nan_count) = count;
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Calculate_Percentile_New (ArrayView<f64> input, f64 percentile, f64* percentile_value_out) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(percentile_value_out);
|
|
|
|
Assert(percentile >= 0.0 && percentile <= 1.0);
|
|
|
|
qsort(input.data, input.count, sizeof(f64), qsort_doubles_comparator);
|
|
s64 non_nan_count = 0;
|
|
Assert(Basic_Count_Non_Nan2(input, &non_nan_count) == nullptr);
|
|
|
|
if (non_nan_count == 0) {
|
|
(*percentile_value_out) = NAN;
|
|
return New_Warning("All values in the input array are `NAN`!");
|
|
}
|
|
|
|
auto r = percentile * non_nan_count;
|
|
auto k = floor(r + 0.5);
|
|
|
|
auto kp1 = k + 1;
|
|
|
|
// Ratio between the K and K+1 rows:
|
|
r = r - k;
|
|
|
|
// Find indices that are out of the range 1 to n and cap them:
|
|
if (k < 1 || isnan(k)) {
|
|
k = 1;
|
|
}
|
|
|
|
// kp1 = min( kp1, n );
|
|
if (non_nan_count < kp1) { kp1 = (f64)non_nan_count; }
|
|
|
|
// Use simple linear interpolation for the valid percentages:
|
|
// y = (0.5+r).*x(kp1,:)+(0.5-r).*x(k,:); // yuck.
|
|
s64 kp1_i = static_cast<s64>(kp1);
|
|
s64 k_i = static_cast<s64>(k);
|
|
|
|
f64 y_first_part = (0.5 + r) * input[kp1_i - 1];
|
|
f64 y_second_part = (0.5 - r) * input[k_i - 1];
|
|
auto y = y_first_part + y_second_part;
|
|
|
|
// Make sure that values we hit exactly are copied rather than interpolated:
|
|
if (r == -0.5) {
|
|
(*percentile_value_out) = input[k_i - 1];
|
|
return nullptr;
|
|
}
|
|
|
|
// Make sure that identical values are copied rather than interpolated:
|
|
if (input[k_i-1] == input[kp1_i-1]) {
|
|
(*percentile_value_out) = input[k_i - 1];
|
|
return nullptr;
|
|
}
|
|
|
|
(*percentile_value_out) = y;
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_ReverseArrayInPlace (ArrayView<f64> input) {
|
|
Array_Check(input);
|
|
ForUpTo(i, input.count/2) {
|
|
f64 temp = input[i];
|
|
input[i] = input[input.count - i - 1];
|
|
input[input.count - i - 1] = temp;
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
// Native_Error* Basic_Reverse_Array (int* input, int input_length) {
|
|
// for (int i = 0; i < input_length / 2; i++) {
|
|
// // Swap the ith and (input_length - i - 1)th elements
|
|
// int temp = input[i];
|
|
// input[i] = input[input_length - i - 1];
|
|
// input[input_length - i - 1] = temp;
|
|
// }
|
|
|
|
// return nullptr;
|
|
// }
|
|
|
|
// Native_Error* Basic_Reverse_Array (double* input, int input_length) {
|
|
// for (int i = 0; i < input_length / 2; i++) {
|
|
// // Swap the ith and (input_length - i - 1)th elements
|
|
// double temp = input[i];
|
|
// input[i] = input[input_length - i - 1];
|
|
// input[input_length - i - 1] = temp;
|
|
// }
|
|
|
|
// return nullptr;
|
|
// }
|
|
|
|
// #TODO: This should be for NDArray or 2DArray. idk.
|
|
Native_Error* Basic_2DArrayInvertMemoryOrder (ArrayView<f64> input, s64 first_dimension, s64 second_dimension, ArrayView<f64> output) {
|
|
Array_Check(input);
|
|
Array_Check(output);
|
|
if (output.count < input.count) { return New_Error("`input.count` should not exceed `output.count`!"); }
|
|
Assert(first_dimension * second_dimension == input.count);
|
|
Assert(input.count == output.count);
|
|
|
|
ForUpTo(i, first_dimension) {
|
|
ForUpTo(j, second_dimension) {
|
|
output[j + second_dimension * i] = input[i + first_dimension * j];
|
|
}
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
bool sort_doubles_comparator(double a, double b) {
|
|
if (isnan(a)) return false; // NaN values are considered greater
|
|
if (isnan(b)) return true; // Non-NaN values are considered smaller
|
|
return a < b; // Normal comparison for non-NaN values
|
|
}
|
|
|
|
int qsort_doubles_comparator_nonnan(const void* a, const void* b) {
|
|
double val1 = (*(const double*)a);
|
|
double val2 = (*(const double*)b);
|
|
|
|
if (val1 < val2) return -1;
|
|
if (val1 > val2) return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int qsort_doubles_comparator(const void* a, const void* b) {
|
|
double val1 = (*(const double*)a);
|
|
double val2 = (*(const double*)b);
|
|
if (isnan(val1)) return 1; // NaN values are considered greater
|
|
if (isnan(val2)) return -1; // Non-NaN values are considered smaller
|
|
|
|
if (val1 < val2) return -1;
|
|
if (val1 > val2) return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
Native_Error* Basic_CalculatePercentileNoSort (ArrayView<f64> input, f64 percentile, f64* percentile_value_out) {
|
|
Array_Check(input);
|
|
Null_Pointer_Check(percentile_value_out);
|
|
|
|
Assert(percentile >= 0.0 && percentile <= 1.0);
|
|
|
|
s64 non_nan_count = input.count;
|
|
|
|
auto r = percentile * non_nan_count;
|
|
auto k = floor(r + 0.5);
|
|
|
|
auto kp1 = k + 1;
|
|
|
|
// Ratio between the K and K+1 rows:
|
|
r = r - k;
|
|
|
|
// Find indices that are out of the range 1 to n and cap them:
|
|
if (k < 1 || isnan(k)) {
|
|
k = 1;
|
|
}
|
|
|
|
// kp1 = min( kp1, n );
|
|
if (non_nan_count < kp1) { kp1 = (f64)non_nan_count; }
|
|
|
|
// Use simple linear interpolation for the valid percentages:
|
|
// y = (0.5+r).*x(kp1,:)+(0.5-r).*x(k,:); // yuck.
|
|
s64 kp1_i = static_cast<s64>(kp1);
|
|
s64 k_i = static_cast<s64>(k);
|
|
|
|
f64 y_first_part = (0.5 + r) * input[kp1_i - 1];
|
|
f64 y_second_part = (0.5 - r) * input[k_i - 1];
|
|
auto y = y_first_part + y_second_part;
|
|
|
|
// Make sure that values we hit exactly are copied rather than interpolated:
|
|
if (r == -0.5) {
|
|
(*percentile_value_out) = input[k_i - 1];
|
|
return nullptr;
|
|
}
|
|
|
|
// Make sure that identical values are copied rather than interpolated:
|
|
if (input[k_i-1] == input[kp1_i-1]) {
|
|
(*percentile_value_out) = input[k_i - 1];
|
|
return nullptr;
|
|
}
|
|
|
|
(*percentile_value_out) = y;
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Replace_Outliers2 (ArrayView<f64> input, f64 outlier_threshold) {
|
|
Array_Check(input);
|
|
Assert(outlier_threshold > 0);
|
|
|
|
auto input_copy = array_copy(input);
|
|
|
|
qsort(input_copy.data, input_copy.count, sizeof(f64), qsort_doubles_comparator_nonnan);
|
|
f64 Q1 = 0.0;
|
|
f64 Q3 = 0.0;
|
|
|
|
Assert(Basic_CalculatePercentileNoSort(input_copy, 0.25, &Q1) == nullptr);
|
|
Assert(Basic_CalculatePercentileNoSort(input_copy, 0.75, &Q3) == nullptr);
|
|
|
|
f64 IQR = Q3 - Q1;
|
|
f64 iqr_outlier_threshold = IQR * outlier_threshold;
|
|
|
|
// Identify points below Q1 - outlier_threshold, and above Q3 + outlier_threshold
|
|
auto low_threshold = Q1 - iqr_outlier_threshold;
|
|
auto high_threshold = Q3 + iqr_outlier_threshold;
|
|
|
|
ForArrayStartingAt(i, input, 1) {
|
|
if (input[i] < low_threshold || input[i] > high_threshold) {
|
|
input[i] = input[i-1];
|
|
}
|
|
}
|
|
|
|
array_free(input_copy);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
Native_Error* Basic_Replace_Values_Beyond_Threshold2 (ArrayView<f64> input, f64 low_threshold, f64 high_threshold, f64 replacement_value) {
|
|
Array_Check(input);
|
|
|
|
ForArray(i, input) {
|
|
if (input[i] < low_threshold || input[i] > high_threshold) {
|
|
input[i] = replacement_value;
|
|
}
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
|
|
/* // #TODO: Replace with version that doesn't use Eigen
|
|
Native_Error* Basic_Roots_To_Polynomials2 (ArrayView<f64> roots, ArrayView<f64> polynomials) {
|
|
Array_Check(roots);
|
|
|
|
s64 root_count = roots.count;
|
|
|
|
if (root_count == 0) { return New_Error("`roots.count` is zero!"); }
|
|
if (polynomials.count < root_count + 1) {
|
|
return New_Error("`polynomials.count` should be roots.count + 1!");
|
|
}
|
|
|
|
// For real roots
|
|
Eigen::VectorXd roots_vec = Eigen::Map<Eigen::VectorXd>(roots.data, root_count);
|
|
// c = [1 zeros(1,n,class(x))];
|
|
Eigen::VectorXd c = Eigen::VectorXd::Zero(root_count + 1);
|
|
c[0] = 1.0;
|
|
// for j = 1:n
|
|
// c[1] = c[1] - roots_vec[0] * c[0]; // Extract first index
|
|
ForArray(i, roots) {
|
|
// c(2:(j+1)) = c(2:(j+1)) - e(j).*c(1:j);
|
|
Eigen::VectorXd val_temp = c.segment(1, i + 1) - roots_vec[i] * c.segment(0, i + 1);
|
|
c.segment(1, i + 1) = val_temp;
|
|
}
|
|
// The result should be real if the roots are complex conjugates.
|
|
memcpy(polynomials.data, c.data(), (root_count + 1) * sizeof(f64));
|
|
|
|
return nullptr;
|
|
}
|
|
*/
|
|
|
|
Complex exponential (Complex cx) {
|
|
f64 e = std::exp(cx.real);
|
|
return Complex(e * std::cos(cx.imag), e * std::sin(cx.imag));
|
|
}
|
|
|
|
Complex conjugate (Complex cx) {
|
|
return Complex(cx.real, -cx.imag);
|
|
}
|
|
|
|
f64 fabs(Complex cx) {
|
|
return sqrt(cx.real * cx.real + cx.imag * cx.imag);
|
|
} |