Janus 2.0.0
High-performance C++20 dual-mode numerical framework
Loading...
Searching...
No Matches
SurrogateModel.hpp
Go to the documentation of this file.
1#pragma once
7
11#include "janus/math/Logic.hpp"
12#include "janus/math/Trig.hpp"
13#include <Eigen/Dense>
14#include <cmath>
15#include <numeric>
16#include <string>
17#include <vector>
18
19namespace janus {
20
21// ======================================================================
22// Softmax (Smooth Maximum / LogSumExp)
23// ======================================================================
24
38template <typename T> auto softmax(const std::vector<T> &args, double softness = 1.0) {
39 if (args.empty()) {
40 throw InvalidArgument("softmax: requires at least one value");
41 }
42 if (softness <= 0.0) {
43 throw InvalidArgument("softmax: softness must be positive");
44 }
45
46 // 1. Find max element-wise
47 auto max_val = args[0];
48 for (size_t i = 1; i < args.size(); ++i) {
49 max_val = janus::max(max_val, args[i]);
50 }
51
52 // 2. Compute sum of exponentials: sum(exp((x - max) / softness))
53 // We need a way to initialize the sum with the correct type and dimensions.
54 // We can compute the first term to initialize.
55
56 // Using auto type deduction for the result of exp operation
57 auto first_term = janus::exp((args[0] - max_val) / softness);
58 auto sum_exp = first_term;
59
60 for (size_t i = 1; i < args.size(); ++i) {
61 sum_exp = sum_exp + janus::exp((args[i] - max_val) / softness);
62 }
63
64 // 3. Final computation
65 return max_val + softness * janus::log(sum_exp);
66}
67
68// Convenience overload for 2 arguments
69template <typename T1, typename T2> auto softmax(const T1 &a, const T2 &b, double softness = 1.0) {
70 // We need to construct a vector, but T1 and T2 might be different types (double vs Matrix)
71 // This is tricky if types are different.
72 // Ideally we assume they are compatible or castable to a common type.
73 // For simplicity in the generic case, let's implement the 2-arg logic directly to allow
74 // standard promotions.
75
76 auto max_val = janus::max(a, b);
77
78 // Compute exp terms
79 // exp((a - max) / softness) + exp((b - max) / softness)
80 // Note: One of the exponents will include (max - max) = 0, so exp(0) = 1.
81 // But we just compute blindly for simplicity and consistency.
82
83 auto term1 = janus::exp((a - max_val) / softness);
84 auto term2 = janus::exp((b - max_val) / softness);
85
86 // Check if we need to broadcast scalar to matrix if one is matrix and other is scalar
87 // janus::exp usually handles this if implemented correctly or if using Eigen arrays.
88 // However, addition `term1 + term2` might fail if one is scalar and other is matrix in standard
89 // Eigen without array(). We assume janus::exp returns compatible types (Eigen matrix or
90 // scalar). If mixed scalar/matrix, standard Eigen requires array operation or broadcasting.
91
92 // Let's rely on janus::exp returning something compatible with operator+
93 return max_val + softness * janus::log(term1 + term2);
94}
95
96// ======================================================================
97// Softmin (Smooth Minimum)
98// ======================================================================
99
108template <typename T> auto softmin(const std::vector<T> &args, double softness = 1.0) {
109 std::vector<T> neg_args;
110 neg_args.reserve(args.size());
111 for (const auto &arg : args) {
112 neg_args.push_back(-arg);
113 }
114 return -softmax(neg_args, softness);
115}
116
117// Convenience overload for 2 arguments
118template <typename T1, typename T2> auto softmin(const T1 &a, const T2 &b, double softness = 1.0) {
119 return -softmax(-a, -b, softness);
120}
121
122// ======================================================================
123// Softplus (Smooth ReLU)
124// ======================================================================
125
134namespace detail {
135inline void validate_hardness(double hardness, const char *function_name) {
136 if (hardness <= 0.0) {
137 throw InvalidArgument(std::string(function_name) + ": hardness must be positive");
138 }
139}
140
141inline void validate_rho(double rho) {
142 if (rho <= 0.0) {
143 throw InvalidArgument("ks_max: rho must be positive");
144 }
145}
146
147template <std::floating_point T> auto softplus_scalar(const T &x, double beta) {
148 const auto bx = beta * x;
149 const auto shift = bx > 0.0 ? bx : 0.0;
150 return (shift + std::log(std::exp(-shift) + std::exp(bx - shift))) / beta;
151}
152
153inline auto softplus_scalar(const SymbolicScalar &x, double beta) {
154 const auto bx = beta * x;
155 // CasADi's logsumexp applies max-shift stabilization internally:
156 // logsumexp(a) = max(a) + log(sum(exp(a - max(a))))
157 return SymbolicScalar::logsumexp(SymbolicScalar::vertcat({SymbolicScalar(0.0), bx})) / beta;
158}
159} // namespace detail
160
161template <JanusScalar T> auto softplus(const T &x, double beta = 1.0, double /*threshold*/ = 20.0) {
162 return detail::softplus_scalar(x, beta);
163}
164
165template <typename Derived>
166auto softplus(const Eigen::MatrixBase<Derived> &x, double beta = 1.0, double threshold = 20.0) {
167 using Scalar = typename Derived::Scalar;
168 return x.derived().unaryExpr(
169 [beta, threshold](const Scalar &v) { return janus::softplus(v, beta, threshold); });
170}
171
172// ======================================================================
173// Smooth Approximation Suite
174// ======================================================================
175
186template <typename T> auto smooth_abs(const T &x, double hardness = 1.0) {
187 detail::validate_hardness(hardness, "smooth_abs");
188 return -x + softplus(2.0 * x, hardness);
189}
190
202template <typename T1, typename T2>
203auto smooth_max(const T1 &a, const T2 &b, double hardness = 1.0) {
204 detail::validate_hardness(hardness, "smooth_max");
205 return b + softplus(a - b, hardness);
206}
207
219template <typename T1, typename T2>
220auto smooth_min(const T1 &a, const T2 &b, double hardness = 1.0) {
221 detail::validate_hardness(hardness, "smooth_min");
222 return a - softplus(a - b, hardness);
223}
224
236template <typename TX, typename TLow, typename THigh>
237auto smooth_clamp(const TX &x, const TLow &low, const THigh &high, double hardness = 1.0) {
238 detail::validate_hardness(hardness, "smooth_clamp");
239 return smooth_min(smooth_max(x, low, hardness), high, hardness);
240}
241
252template <JanusScalar T> auto ks_max(const std::vector<T> &values, double rho = 1.0) {
253 if (values.empty()) {
254 throw InvalidArgument("ks_max: requires at least one value");
255 }
257
258 if constexpr (std::is_floating_point_v<T>) {
259 const auto max_val = *std::max_element(values.begin(), values.end());
260 double sum_exp = 0.0;
261 for (const auto &value : values) {
262 sum_exp += std::exp(rho * (value - max_val));
263 }
264 return max_val + std::log(sum_exp) / rho;
265 } else {
266 return SymbolicScalar::logsumexp(rho * SymbolicScalar::vertcat(values)) / rho;
267 }
268}
269
270template <typename Derived>
271auto ks_max(const Eigen::MatrixBase<Derived> &values, double rho = 1.0) {
272 using Scalar = typename Derived::Scalar;
273 std::vector<Scalar> flattened;
274 flattened.reserve(static_cast<size_t>(values.size()));
275 for (Eigen::Index i = 0; i < values.size(); ++i) {
276 flattened.push_back(values.derived().coeff(i));
277 }
278 return ks_max(flattened, rho);
279}
280
281// ======================================================================
282// Sigmoid
283// ======================================================================
284
294
304template <typename T>
305auto sigmoid(const T &x, SigmoidType type = SigmoidType::Tanh, double norm_min = 0.0,
306 double norm_max = 1.0) {
307
308 // 1. Compute base sigmoid 's' in range [-1, 1] if possible, or standard form
309 // The reference implementation normalizes based on the theoretical raw range of the function.
310 // Tanh: range (-1, 1)
311 // Arctan: scaled 2/pi * atan(pi/2 * x) -> range (-1, 1)
312 // Polynomial: x / sqrt(1 + x^2) -> range (-1, 1)
313
314 auto s = x; // placeholder type initialization
315
316 switch (type) {
318 s = janus::tanh(x);
319 break;
321 s = janus::tanh(0.5 * x);
322 break;
324 s = (2.0 / M_PI) * janus::atan((M_PI / 2.0) * x);
325 break;
327 s = x / janus::sqrt(1.0 + x * x);
328 break;
329 }
330
331 // 2. Normalize from [-1, 1] to [norm_min, norm_max]
332 // s_normalized = s * (max - min) / 2 + (max + min) / 2
333 double scale = (norm_max - norm_min) / 2.0;
334 double offset = (norm_max + norm_min) / 2.0;
335
336 return s * scale + offset;
337}
338
339// ======================================================================
340// Swish
341// ======================================================================
342
350template <typename T> auto swish(const T &x, double beta = 1.0) {
351 return x / (1.0 + janus::exp(-beta * x));
352}
353
354// ======================================================================
355// Blend
356// ======================================================================
357
366template <typename TSwitch, typename THigh, typename TLow>
367auto blend(const TSwitch &switch_val, const THigh &val_high, const TLow &val_low) {
368 // Helper sigmoid (tanh type, 0 to 1 range)
369 auto weights = sigmoid(switch_val, SigmoidType::Tanh, 0.0, 1.0);
370
371 return val_high * weights + val_low * (1.0 - weights);
372}
373
374} // namespace janus
Scalar and element-wise arithmetic functions (abs, sqrt, pow, exp, log, etc.).
C++20 concepts constraining valid Janus scalar types.
Custom exception hierarchy for Janus framework.
Conditional selection, comparison, and logical operations.
Trigonometric and inverse trigonometric functions.
Input validation failed (e.g., mismatched sizes, invalid parameters).
Definition JanusError.hpp:31
void validate_rho(double rho)
Definition SurrogateModel.hpp:141
auto softplus_scalar(const T &x, double beta)
Definition SurrogateModel.hpp:147
void validate_hardness(double hardness, const char *function_name)
Definition SurrogateModel.hpp:135
Definition Diagnostics.hpp:19
auto sigmoid(const T &x, SigmoidType type=SigmoidType::Tanh, double norm_min=0.0, double norm_max=1.0)
Sigmoid function with normalization capability.
Definition SurrogateModel.hpp:305
T tanh(const T &x)
Computes hyperbolic tangent of x.
Definition Arithmetic.hpp:253
SigmoidType
Sigmoid shape selection.
Definition SurrogateModel.hpp:288
@ Polynomial
Polynomial: x / sqrt(1 + x^2).
Definition SurrogateModel.hpp:292
@ Arctan
Arc tangent based.
Definition SurrogateModel.hpp:291
@ Logistic
Logistic / standard sigmoid.
Definition SurrogateModel.hpp:290
@ Tanh
Hyperbolic tangent.
Definition SurrogateModel.hpp:289
T sqrt(const T &x)
Computes the square root of a scalar.
Definition Arithmetic.hpp:46
T atan(const T &x)
Computes arc tangent of x.
Definition Trig.hpp:146
T log(const T &x)
Computes the natural logarithm of x.
Definition Arithmetic.hpp:156
auto ks_max(const std::vector< T > &values, double rho=1.0)
Kreisselmeier-Steinhauser smooth maximum aggregator.
Definition SurrogateModel.hpp:252
auto smooth_clamp(const TX &x, const TLow &low, const THigh &high, double hardness=1.0)
Smooth approximation of clamp(x, low, high).
Definition SurrogateModel.hpp:237
auto softplus(const T &x, double beta=1.0, double=20.0)
Definition SurrogateModel.hpp:161
auto swish(const T &x, double beta=1.0)
Swish activation function: x / (1 + exp(-beta * x)).
Definition SurrogateModel.hpp:350
@ Polynomial
Definition AutoDiff.hpp:32
auto smooth_abs(const T &x, double hardness=1.0)
Smooth approximation of absolute value.
Definition SurrogateModel.hpp:186
auto blend(const TSwitch &switch_val, const THigh &val_high, const TLow &val_low)
Smoothly blends between two values based on a switch parameter.
Definition SurrogateModel.hpp:367
auto softmax(const std::vector< T > &args, double softness=1.0)
Computes the smooth maximum (LogSumExp) of a collection of values.
Definition SurrogateModel.hpp:38
auto smooth_max(const T1 &a, const T2 &b, double hardness=1.0)
Pairwise smooth maximum.
Definition SurrogateModel.hpp:203
auto smooth_min(const T1 &a, const T2 &b, double hardness=1.0)
Pairwise smooth minimum.
Definition SurrogateModel.hpp:220
auto softmin(const std::vector< T > &args, double softness=1.0)
Computes the smooth minimum of a collection of values. softmin(x) = -softmax(-x).
Definition SurrogateModel.hpp:108
casadi::MX SymbolicScalar
CasADi MX symbolic scalar.
Definition JanusTypes.hpp:70
auto max(const T1 &a, const T2 &b)
Computes maximum of two values.
Definition Logic.hpp:194
T exp(const T &x)
Computes the exponential function e^x.
Definition Arithmetic.hpp:131