diff --git a/src/main/java/org/spaceroots/mantissa/ode/AdaptiveStepsizeIntegrator.java b/src/main/java/org/spaceroots/mantissa/ode/AdaptiveStepsizeIntegrator.java index a903930b76..e06c139dc1 100644 --- a/src/main/java/org/spaceroots/mantissa/ode/AdaptiveStepsizeIntegrator.java +++ b/src/main/java/org/spaceroots/mantissa/ode/AdaptiveStepsizeIntegrator.java @@ -4,7 +4,7 @@ * Base class for integrators that adapt their step size while solving ordinary differential * equations. * - *
The class centralizes step bounds, tolerance bookkeeping and initialization so concrete + *
The class centralizes step bounds, tolerance bookkeeping, and initialization so concrete * algorithms can focus on method-specific interpolation and local error estimation. For each state * component {@code i}, the error threshold is {@code absTol_i + relTol_i * max(|y_m|, |y_{m+1}|)}; * scalar tolerances are broadcast when vector forms are not provided. A tentative step is accepted @@ -68,16 +68,16 @@ protected AdaptiveStepsizeIntegrator( /** * Build an integrator with per-component tolerances and step size bounds. * - *
Vector tolerances let callers adapt admissible error to the magnitude of each state - * coordinate. Arrays are used as provided; callers must ensure their length matches the problem - * dimension. The default step handler performs no action until replaced. + *
Vector tolerances let callers adapt admissible error to the scale of each state coordinate. + * Arrays are used as provided; callers must ensure their length matches the problem dimension. + * The default step handler performs no action until replaced. * * @param minStep minimal allowed step magnitude; positive even when integrating backward. * @param maxStep maximal allowed step magnitude; positive and not smaller than {@code minStep}. - * @param vecAbsoluteTolerance absolute error tolerance for each component; must align with state - * dimension and contain non-negative entries. - * @param vecRelativeTolerance relative error tolerance for each component; must align with state - * dimension and contain non-negative entries. + * @param vecAbsoluteTolerance absolute error tolerance for each component; must align with the + * state dimension and contain non-negative entries. + * @param vecRelativeTolerance relative error tolerance for each component; must align with the + * state dimension and contain non-negative entries. */ protected AdaptiveStepsizeIntegrator( double minStep, @@ -106,13 +106,12 @@ protected AdaptiveStepsizeIntegrator( * Set an explicit initial step size to be reused by the next integration run. * *
A positive value inside {@code [minStep, maxStep]} is honored directly by {@link - * #initializeStep(FirstOrderDifferentialEquations, boolean, int, double[], double, double[], - * double[], double[], double[])}. Negative or out-of-range values instruct the integrator to - * estimate an initial step from the derivatives instead. The stored value is not persisted across - * calls to {@link #resetInternalState()}. + * #initializeStep(StepInitializationContext)}. Negative or out-of-range values instruct the + * integrator to estimate an initial step from the derivatives instead. The stored value is not + * persisted across calls to {@link #resetInternalState()}. * - * @param initialStepSize desired starting step; must be positive and within configured bounds, or - * it is ignored in favor of automatic estimation. + * @param initialStepSize the desired starting step must be positive and within configured bounds, + * or it is ignored in favor of automatic estimation. */ @SuppressWarnings("unused") public void setInitialStepSize(double initialStepSize) { @@ -176,52 +175,27 @@ public void addSwitchingFunction( * step is derived from the ratio of scaled state and derivative norms, refined with a trial Euler * step, and finally clamped to the configured minimum and maximum step sizes. * - * @param equations system of first-order differential equations providing derivative evaluation; - * must not be {@code null}. - * @param forward {@code true} to integrate toward increasing time, {@code false} to integrate - * backward. - * @param order order of the integration method that will consume the step; influences the scaling - * of the heuristic. - * @param scale per-component scaling factors applied to the state vector; must match {@code y0} - * length and contain only non-zero values. - * @param t0 starting independent variable value associated with {@code y0}. - * @param y0 state vector at {@code t0}; not modified by this method. - * @param yDot0 first derivative of {@code y0}; computed by {@code equations} and reused in place. - * @param y1 workspace receiving a trial state after an Euler prediction; length must match {@code - * y0}. - * @param yDot1 workspace receiving the derivative at the trial state; length must match {@code - * y0}. + * @param context bundled initialization parameters for the step size heuristic. * @return signed initial step size bounded by configured limits and oriented according to {@code - * forward}. - * @throws DerivativeException if {@code equations} fails while evaluating derivatives at the - * trial point. + * context.forward()}. + * @throws DerivativeException if derivative evaluation fails while probing the trial point. */ - public double initializeStep( - FirstOrderDifferentialEquations equations, - boolean forward, - int order, - double[] scale, - double t0, - double[] y0, - double[] yDot0, - double[] y1, - double[] yDot1) - throws DerivativeException { + public double initializeStep(StepInitializationContext context) throws DerivativeException { if (initialStep > 0) { - // use the user provided value - return forward ? initialStep : -initialStep; + // use the user-provided value + return context.forward() ? initialStep : -initialStep; } - // very rough first guess : h = 0.01 * ||y/scale|| / ||y'/scale|| + // very rough first guess: h = 0.01 * ||y/scale|| / ||y'/scale|| // this guess will be used to perform an Euler step double ratio; double yOnScale2 = 0; double yDotOnScale2 = 0; - for (int j = 0; j < y0.length; ++j) { - ratio = y0[j] / scale[j]; + for (int j = 0; j < context.y0().length; ++j) { + ratio = context.y0()[j] / context.scale()[j]; yOnScale2 += ratio * ratio; - ratio = yDot0[j] / scale[j]; + ratio = context.yDot0()[j] / context.scale()[j]; yDotOnScale2 += ratio * ratio; } @@ -229,20 +203,20 @@ public double initializeStep( ((yOnScale2 < 1.0e-10) || (yDotOnScale2 < 1.0e-10)) ? 1.0e-6 : (0.01 * Math.sqrt(yOnScale2 / yDotOnScale2)); - if (!forward) { + if (!context.forward()) { h = -h; } // perform an Euler step using the preceding rough guess - for (int j = 0; j < y0.length; ++j) { - y1[j] = y0[j] + h * yDot0[j]; + for (int j = 0; j < context.y0().length; ++j) { + context.y1()[j] = context.y0()[j] + h * context.yDot0()[j]; } - equations.computeDerivatives(t0 + h, y1, yDot1); + context.equations().computeDerivatives(context.t0() + h, context.y1(), context.yDot1()); // estimate the second derivative of the solution double yDDotOnScale = 0; - for (int j = 0; j < y0.length; ++j) { - ratio = (yDot1[j] - yDot0[j]) / scale[j]; + for (int j = 0; j < context.y0().length; ++j) { + ratio = (context.yDot1()[j] - context.yDot0()[j]) / context.scale()[j]; yDDotOnScale += ratio * ratio; } yDDotOnScale = Math.sqrt(yDDotOnScale) / h; @@ -253,16 +227,16 @@ public double initializeStep( double h1 = (maxInv2 < 1.0e-15) ? Math.max(1.0e-6, 0.001 * Math.abs(h)) - : Math.pow(0.01 / maxInv2, 1.0 / order); + : Math.pow(0.01 / maxInv2, 1.0 / context.order()); h = Math.min(100.0 * Math.abs(h), h1); - h = Math.max(h, 1.0e-12 * Math.abs(t0)); // avoids cancellation when computing t1 - t0 + h = Math.max(h, 1.0e-12 * Math.abs(context.t0())); if (h < getMinStep()) { h = getMinStep(); } if (h > getMaxStep()) { h = getMaxStep(); } - if (!forward) { + if (!context.forward()) { h = -h; } @@ -387,8 +361,7 @@ public double getMaxStep() { /** * Optional user-supplied initial step size; a negative value signals that automatic estimation * should be used instead. The value is mutable between runs and is consumed by {@link - * #initializeStep(FirstOrderDifferentialEquations, boolean, int, double[], double, double[], - * double[], double[], double[])}. + * #initializeStep(StepInitializationContext)}. */ private double initialStep; @@ -401,7 +374,7 @@ public double getMaxStep() { /** * Relative error tolerance applied uniformly when vector tolerances are not provided. It scales - * the acceptable error proportionally to the magnitude of each state component during adaptive + * the acceptable error proportionally to the scale of each state component during adaptive * control. */ protected double scalRelativeTolerance; @@ -429,7 +402,7 @@ public double getMaxStep() { /** * Aggregates registered switching functions and coordinates event detection. It controls sampling - * frequency, root finding and state changes triggered by events encountered during the adaptive + * frequency, root finding, and state changes triggered by events encountered during the adaptive * integration process. */ protected SwitchingFunctionsHandler switchesHandler; diff --git a/src/main/java/org/spaceroots/mantissa/ode/DormandPrince54Integrator.java b/src/main/java/org/spaceroots/mantissa/ode/DormandPrince54Integrator.java index c0ccee05b6..e9af0c7c86 100644 --- a/src/main/java/org/spaceroots/mantissa/ode/DormandPrince54Integrator.java +++ b/src/main/java/org/spaceroots/mantissa/ode/DormandPrince54Integrator.java @@ -71,11 +71,7 @@ public class DormandPrince54Integrator extends RungeKuttaFehlbergIntegrator { public DormandPrince54Integrator( double minStep, double maxStep, double scalAbsoluteTolerance, double scalRelativeTolerance) { super( - true, - C, - A, - B, - new DormandPrince54StepInterpolator(), + new RungeKuttaFehlbergMethod(true, C, A, B, new DormandPrince54StepInterpolator()), minStep, maxStep, scalAbsoluteTolerance, @@ -104,11 +100,7 @@ public DormandPrince54Integrator( double[] vecAbsoluteTolerance, double[] vecRelativeTolerance) { super( - true, - C, - A, - B, - new DormandPrince54StepInterpolator(), + new RungeKuttaFehlbergMethod(true, C, A, B, new DormandPrince54StepInterpolator()), minStep, maxStep, vecAbsoluteTolerance, @@ -145,11 +137,11 @@ public int getOrder() { * Computes the normalized root-mean-square local error estimate for a tentative step. * *
The implementation combines the Dormand–Prince error coefficients with the stage derivatives - * to form the embedded fourth-order estimate, scales each component by the greater magnitude of - * the start and end state, and divides by either scalar or per-component tolerances. The returned + * to form the embedded fourth-order estimate, scales each component by the greater scale of the + * start and end state, and divides by either scalar or per-component tolerances. The returned * value is compared against {@code 1.0}; values above one trigger a rejected step and step-size * reduction, while values below one permit acceptance. The calculation is deterministic and - * agnostic to integration direction. + * agnostic to the integration direction. * * @param yDotK derivatives for all Runge–Kutta stages; index {@code [k][j]} stores stage {@code * k} for component {@code j}. diff --git a/src/main/java/org/spaceroots/mantissa/ode/DormandPrince853Integrator.java b/src/main/java/org/spaceroots/mantissa/ode/DormandPrince853Integrator.java index 1623663d70..7bd0818a50 100644 --- a/src/main/java/org/spaceroots/mantissa/ode/DormandPrince853Integrator.java +++ b/src/main/java/org/spaceroots/mantissa/ode/DormandPrince853Integrator.java @@ -9,8 +9,8 @@ * the normalized local error below one. It automatically estimates an initial step, enforces * user-provided minimum and maximum magnitudes, and reuses the final derivative of each accepted * step (the first same as last property) to save one evaluation on the next step. Continuous - * output is available through a step interpolator so callers can sample intermediate states without - * retriggering derivative evaluations. + * output is available through a step interpolator, so callers can sample intermediate states + * without retriggering derivative evaluations. * *
Use this class when problems are non-stiff, require tight error control, or benefit from * higher-order dense output. Instances are mutable and not thread-safe; confine one integrator to a @@ -213,19 +213,15 @@ public class DormandPrince853Integrator extends RungeKuttaFehlbergIntegrator { * runs; values smaller than this may be used only for the final partial step. * @param maxStep positive upper bound for the absolute step size used during adaptation; must not * be smaller than {@code minStep} and is honored for both forward and backward directions. - * @param scalAbsoluteTolerance uniform absolute error threshold applied to each component when + * @param scalAbsoluteTolerance a uniform absolute error threshold applied to each component when * normalizing local error estimates; must be non-negative. - * @param scalRelativeTolerance uniform relative error threshold scaled by the largest magnitude - * of the start and end state values for each component; must be non-negative. + * @param scalRelativeTolerance a uniform relative error threshold scaled by the largest scale of + * the start and end state values for each component; must be non-negative. */ public DormandPrince853Integrator( double minStep, double maxStep, double scalAbsoluteTolerance, double scalRelativeTolerance) { super( - true, - C, - A, - B, - new DormandPrince853StepInterpolator(), + new RungeKuttaFehlbergMethod(true, C, A, B, new DormandPrince853StepInterpolator()), minStep, maxStep, scalAbsoluteTolerance, @@ -249,8 +245,7 @@ public DormandPrince853Integrator( * @param vecAbsoluteTolerance absolute error thresholds per state component used to scale local * truncation error estimates; array length must match the problem dimension. * @param vecRelativeTolerance relative error thresholds per state component multiplied by the - * larger magnitude of the start and end values; array length must match the problem - * dimension. + * larger scale of the start and end values; array length must match the problem dimension. */ public DormandPrince853Integrator( double minStep, @@ -258,11 +253,7 @@ public DormandPrince853Integrator( double[] vecAbsoluteTolerance, double[] vecRelativeTolerance) { super( - true, - C, - A, - B, - new DormandPrince853StepInterpolator(), + new RungeKuttaFehlbergMethod(true, C, A, B, new DormandPrince853StepInterpolator()), minStep, maxStep, vecAbsoluteTolerance, @@ -299,7 +290,7 @@ public int getOrder() { * *
The method combines two embedded error estimators (orders five and three) to produce a * single scalar error ratio. Each state component is scaled by either scalar or vector - * tolerances, using the larger magnitude of the start and end values to normalize derivatives. A + * tolerances, using the larger scale of the start and end values to normalize derivatives. A * return value above one signals that the step should be rejected and retried with a smaller * size; values at or below one allow the step to be accepted and possibly grown for the next * attempt. The computation does not modify the provided arrays. @@ -310,8 +301,8 @@ public int getOrder() { * dimension and is used for scaling tolerances. * @param y1 state estimate at the end of the candidate step produced by the high-order formula; * length matches {@code y0} and is used for scaling tolerances. - * @param h signed size of the trial step currently under evaluation; absolute value is used when - * computing the returned ratio. + * @param h the signed size of the trial step currently under evaluation; absolute value is used + * when computing the returned ratio. * @return normalized error ratio; values greater than one trigger step rejection and reduction, * while values at or below one allow acceptance. */ diff --git a/src/main/java/org/spaceroots/mantissa/ode/GraggBulirschStoerIntegrator.java b/src/main/java/org/spaceroots/mantissa/ode/GraggBulirschStoerIntegrator.java index 432ed3869d..ffd8d73cfb 100644 --- a/src/main/java/org/spaceroots/mantissa/ode/GraggBulirschStoerIntegrator.java +++ b/src/main/java/org/spaceroots/mantissa/ode/GraggBulirschStoerIntegrator.java @@ -1,5 +1,9 @@ package org.spaceroots.mantissa.ode; +import java.util.Arrays; +import java.util.Objects; +import org.jetbrains.annotations.NotNull; + /** * Gragg–Bulirsch–Stoer integrator for non-stiff Ordinary Differential Equations with dense output * support. @@ -74,9 +78,9 @@ public GraggBulirschStoerIntegrator( * *
Choose this constructor when each state component requires its own absolute and relative * tolerance. Vector tolerances must match the dimension returned by the supplied equations. All - * other defaults mirror the scalar constructor: stability checks enabled, conservative step-size - * control, and dense output if any listener requires it. The integrator copies no user buffers, - * so callers retain ownership of the tolerance arrays. + * other defaults mirror the scalar constructor: stability checks are enabled, conservative + * step-size control, and dense output if any listener requires it. The integrator copies no user + * buffers, so callers retain ownership of the tolerance arrays. * * @param minStep minimal step size in integration variable; must be strictly positive * @param maxStep maximal step size allowed; must be strictly positive @@ -101,7 +105,7 @@ public GraggBulirschStoerIntegrator( * Configure stability checks performed during early extrapolation iterations. * *
For each candidate step the integrator compares the first derivative with later midpoint - * derivatives to detect rapidly growing modes. If the check fails, the step is rejected and the + * derivatives to detect rapidly growing modes. If the check fails, the step is rejected, and the * trial step size is reduced by the supplied factor. Checks are limited to the first few * extrapolation iterations to avoid excessive cost. Passing negative or zero limits restores the * defaults ({@code maxIter=2}, {@code maxChecks=1}, {@code stabilityReduction=0.5}). This setting @@ -137,7 +141,7 @@ public void setStabilityCheck( * hNew = h * stepControl2 / Math.pow(err / stepControl1, 1.0 / (2 * k + 1)) * } * - *
where {@code err} is the normalized error estimate and {@code k} is the extrapolation column + *
Where {@code err} is the normalized error estimate and {@code k} is the extrapolation column * (starting at zero). The result is further clamped by * *
{@code
@@ -298,7 +302,7 @@ private void initializeArrays() {
}
// initialize the order selection cost array
- // (number of function calls for each column of the extrapolation table)
+ // (number of function-calls for each column of the extrapolation table)
costPerStep[0] = sequence[0] + 1;
for (int k = 1; k < size; ++k) {
costPerStep[k] = costPerStep[k - 1] + sequence[k];
@@ -353,11 +357,11 @@ public String getName() {
*
* Each entry combines absolute and relative tolerances with the larger magnitude among the two
* provided state vectors, mirroring the scaling used by Hairer and Wanner. When vector tolerances
- * are configured the corresponding element-wise values are used; otherwise scalar tolerances are
+ * are configured, the corresponding element-wise values are used; otherwise scalar tolerances are
* applied uniformly.
*
- * @param y1 first state vector sampled for magnitude; must match the problem dimension
- * @param y2 second state vector sampled for magnitude; must match the problem dimension
+ * @param y1 the first state vector sampled for magnitude; must match the problem dimension
+ * @param y2 the second state vector sampled for magnitude; must match the problem dimension
* @param scale output buffer receiving per-component scales; length must match {@code y1}
*/
private void rescale(double[] y1, double[] y2, double[] scale) {
@@ -382,33 +386,24 @@ private void rescale(double[] y1, double[] y2, double[] scale) {
* last substep is symmetrically corrected to keep the scheme centered. Intermediate derivatives
* and states are written into the provided buffers to avoid allocations.
*
- * @param equations differential equations being integrated; must match the configured dimension
- * @param t0 start time for the global step
- * @param y0 state vector at {@code t0}; not modified by this method
- * @param step global step length attempted for this iteration
- * @param k extrapolation column index (0-based) selecting the number of substeps
- * @param scale per-component scaling factors for error and stability checks
- * @param f scratch array holding derivatives per substep; {@code f[0]} must already contain the
- * derivative at {@code t0}
- * @param yMiddle buffer receiving the state at mid-step when needed for dense output
- * @param yEnd buffer receiving the state at the end of the trial step
- * @param yTmp scratch buffer for intermediate midpoint updates
+ * @param context bundle carrying the equations, step configuration, and scratch buffers used by
+ * the modified midpoint sequence
* @return {@code true} when the full sequence completed; {@code false} if stability checks failed
* and the caller should reduce the step
* @throws DerivativeException if the user-provided derivative function throws during evaluation
*/
- private boolean tryStep(
- FirstOrderDifferentialEquations equations,
- double t0,
- double[] y0,
- double step,
- int k,
- double[] scale,
- double[][] f,
- double[] yMiddle,
- double[] yEnd,
- double[] yTmp)
- throws DerivativeException {
+ private boolean tryStep(ModifiedMidpointContext context) throws DerivativeException {
+
+ FirstOrderDifferentialEquations equations = context.equations;
+ double t0 = context.t0;
+ double[] y0 = context.y0;
+ double step = context.step;
+ int k = context.k;
+ double[] scale = context.scale;
+ double[][] f = context.f;
+ double[] yMiddle = context.yMiddle;
+ double[] yEnd = context.yEnd;
+ double[] yTmp = context.yTmp;
int n = sequence[k];
double subStep = step / n;
@@ -537,37 +532,31 @@ public void integrate(
handler.reset();
costPerTimeUnit[0] = 0;
- runIntegrationLoop(equations, t, y, forward, scale, interpolator, workingState, status);
+ IntegrationContext integrationContext =
+ new IntegrationContext(equations, t, y, forward, scale, interpolator, workingState, status);
+ runIntegrationLoop(integrationContext);
}
- private void runIntegrationLoop(
- FirstOrderDifferentialEquations equations,
- double targetTime,
- double[] y,
- boolean forward,
- double[] scale,
- AbstractStepInterpolator interpolator,
- WorkingState workingState,
- StepStatus status)
+ private void runIntegrationLoop(IntegrationContext context)
throws DerivativeException, IntegratorException {
- while (!status.lastStep) {
- performSingleStep(
- equations, targetTime, y, forward, scale, interpolator, workingState, status);
+ while (!context.status.lastStep) {
+ performSingleStep(context);
}
}
- private void performSingleStep(
- FirstOrderDifferentialEquations equations,
- double targetTime,
- double[] y,
- boolean forward,
- double[] scale,
- AbstractStepInterpolator interpolator,
- WorkingState w,
- StepStatus status)
+ private void performSingleStep(IntegrationContext context)
throws DerivativeException, IntegratorException {
+ FirstOrderDifferentialEquations equations = context.equations;
+ double targetTime = context.targetTime;
+ double[] y = context.y;
+ boolean forward = context.forward;
+ double[] scale = context.scale;
+ AbstractStepInterpolator interpolator = context.interpolator;
+ WorkingState w = context.workingState;
+ StepStatus status = context.status;
+
status.reject = false;
status.loopExit = false;
prepareNewStep(equations, y, forward, scale, interpolator, w, status);
@@ -595,6 +584,158 @@ private void performSingleStep(
updateRejectionStatus(status, status.reject);
}
+ private record IntegrationContext(
+ FirstOrderDifferentialEquations equations,
+ double targetTime,
+ double[] y,
+ boolean forward,
+ double[] scale,
+ AbstractStepInterpolator interpolator,
+ WorkingState workingState,
+ StepStatus status) {
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (!(other
+ instanceof
+ IntegrationContext(
+ FirstOrderDifferentialEquations otherEquations,
+ double otherTargetTime,
+ double[] otherY,
+ boolean otherForward,
+ double[] otherScale,
+ AbstractStepInterpolator otherInterpolator,
+ WorkingState otherWorkingState,
+ StepStatus otherStatus))) {
+ return false;
+ }
+ return otherForward == forward
+ && Double.doubleToLongBits(otherTargetTime) == Double.doubleToLongBits(targetTime)
+ && Objects.equals(otherEquations, equations)
+ && Arrays.equals(otherY, y)
+ && Arrays.equals(otherScale, scale)
+ && Objects.equals(otherInterpolator, interpolator)
+ && Objects.equals(otherWorkingState, workingState)
+ && Objects.equals(otherStatus, status);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Objects.hash(equations, targetTime, forward, interpolator, workingState, status);
+ result = 31 * result + Arrays.hashCode(y);
+ result = 31 * result + Arrays.hashCode(scale);
+ return result;
+ }
+
+ @Override
+ public @NotNull String toString() {
+ return "IntegrationContext["
+ + "equations="
+ + equations
+ + ", targetTime="
+ + targetTime
+ + ", y="
+ + Arrays.toString(y)
+ + ", forward="
+ + forward
+ + ", scale="
+ + Arrays.toString(scale)
+ + ", interpolator="
+ + interpolator
+ + ", workingState="
+ + workingState
+ + ", status="
+ + status
+ + "]";
+ }
+ }
+
+ private record ModifiedMidpointContext(
+ FirstOrderDifferentialEquations equations,
+ double t0,
+ double[] y0,
+ double step,
+ int k,
+ double[] scale,
+ double[][] f,
+ double[] yMiddle,
+ double[] yEnd,
+ double[] yTmp) {
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (!(other
+ instanceof
+ ModifiedMidpointContext(
+ FirstOrderDifferentialEquations otherEquations,
+ double otherT0,
+ double[] otherY0,
+ double otherStep,
+ int otherK,
+ double[] otherScale,
+ double[][] otherF,
+ double[] otherYMiddle,
+ double[] otherYEnd,
+ double[] otherYTmp))) {
+ return false;
+ }
+ return otherK == k
+ && Double.doubleToLongBits(otherT0) == Double.doubleToLongBits(t0)
+ && Double.doubleToLongBits(otherStep) == Double.doubleToLongBits(step)
+ && Objects.equals(otherEquations, equations)
+ && Arrays.equals(otherY0, y0)
+ && Arrays.equals(otherScale, scale)
+ && Arrays.deepEquals(otherF, f)
+ && Arrays.equals(otherYMiddle, yMiddle)
+ && Arrays.equals(otherYEnd, yEnd)
+ && Arrays.equals(otherYTmp, yTmp);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Objects.hash(equations, t0, step, k);
+ result = 31 * result + Arrays.hashCode(y0);
+ result = 31 * result + Arrays.hashCode(scale);
+ result = 31 * result + Arrays.deepHashCode(f);
+ result = 31 * result + Arrays.hashCode(yMiddle);
+ result = 31 * result + Arrays.hashCode(yEnd);
+ result = 31 * result + Arrays.hashCode(yTmp);
+ return result;
+ }
+
+ @Override
+ public @NotNull String toString() {
+ return "ModifiedMidpointContext["
+ + "equations="
+ + equations
+ + ", t0="
+ + t0
+ + ", y0="
+ + Arrays.toString(y0)
+ + ", step="
+ + step
+ + ", k="
+ + k
+ + ", scale="
+ + Arrays.toString(scale)
+ + ", f="
+ + Arrays.deepToString(f)
+ + ", yMiddle="
+ + Arrays.toString(yMiddle)
+ + ", yEnd="
+ + Arrays.toString(yEnd)
+ + ", yTmp="
+ + Arrays.toString(yTmp)
+ + "]";
+ }
+ }
+
/**
* Store results for an accepted step, notify handlers, and prepare the next trial size/order.
*
@@ -663,15 +804,16 @@ private void prepareNewStep(
if (status.firstTime) {
status.hNew =
initializeStep(
- equations,
- forward,
- 2 * status.targetIter + 1,
- scale,
- stepStart,
- y,
- w.yDot0,
- w.yTmp,
- w.yTmpDot);
+ new StepInitializationContext(
+ equations,
+ forward,
+ 2 * status.targetIter + 1,
+ scale,
+ stepStart,
+ y,
+ w.yDot0,
+ w.yTmp,
+ w.yTmpDot));
if (!forward) {
status.hNew = -status.hNew;
}
@@ -726,16 +868,17 @@ private boolean tryCurrentSubStep(
throws DerivativeException, IntegratorException {
if (tryStep(
- equations,
- stepStart,
- y,
- stepSize,
- k,
- scale,
- w.fk[k],
- (k == 0) ? w.yMidDots[0] : w.diagonal[k - 1],
- (k == 0) ? w.y1 : w.y1Diag[k - 1],
- w.yTmp)) {
+ new ModifiedMidpointContext(
+ equations,
+ stepStart,
+ y,
+ stepSize,
+ k,
+ scale,
+ w.fk[k],
+ (k == 0) ? w.yMidDots[0] : w.diagonal[k - 1],
+ (k == 0) ? w.y1 : w.y1Diag[k - 1],
+ w.yTmp))) {
return true;
}
diff --git a/src/main/java/org/spaceroots/mantissa/ode/HighamHall54Integrator.java b/src/main/java/org/spaceroots/mantissa/ode/HighamHall54Integrator.java
index 65cee62478..6dbab4847c 100644
--- a/src/main/java/org/spaceroots/mantissa/ode/HighamHall54Integrator.java
+++ b/src/main/java/org/spaceroots/mantissa/ode/HighamHall54Integrator.java
@@ -58,9 +58,9 @@ public class HighamHall54Integrator extends RungeKuttaFehlbergIntegrator {
* ceiling, but the final step may be shorter to land exactly on the target time. All parameters
* must be strictly positive to allow both forward and backward integration.
*
- * @param minStep smallest step the controller will attempt before potentially failing, in the
+ * @param minStep the smallest step the controller will attempt before potentially failing, in the
* integration time units; must be greater than zero.
- * @param maxStep largest step the controller may take; must exceed {@code minStep} and be
+ * @param maxStep the largest step the controller may take; must exceed {@code minStep} and be
* positive even when integrating backward in time.
* @param scalAbsoluteTolerance uniform absolute error tolerance applied to every state component;
* must be non-negative but typically small compared to expected state magnitudes.
@@ -71,11 +71,7 @@ public class HighamHall54Integrator extends RungeKuttaFehlbergIntegrator {
public HighamHall54Integrator(
double minStep, double maxStep, double scalAbsoluteTolerance, double scalRelativeTolerance) {
super(
- false,
- c,
- a,
- b,
- new HighamHall54StepInterpolator(),
+ new RungeKuttaFehlbergMethod(false, c, a, b, new HighamHall54StepInterpolator()),
minStep,
maxStep,
scalAbsoluteTolerance,
@@ -90,10 +86,10 @@ public HighamHall54Integrator(
* scalar constructor: they constrain the adaptive controller but do not guarantee fixed step
* lengths. A final step may still be shortened to land exactly at the integration endpoint.
*
- * @param minStep smallest admissible step in time units; must be positive for both forward and
- * backward integration scenarios.
+ * @param minStep the smallest admissible step in time units; must be positive for both forward
+ * and backward integration scenarios.
* @param maxStep largest admissible step; must be positive and not smaller than {@code minStep}.
- * @param vecAbsoluteTolerance element-wise absolute tolerances; each entry must be non-negative
+ * @param vecAbsoluteTolerance element-wise absolute tolerances; each entry must be non-negative,
* and the array length must equal the equations dimension.
* @param vecRelativeTolerance element-wise relative tolerances; each entry must be non-negative
* and combined with the absolute tolerances for scaling.
@@ -105,11 +101,7 @@ public HighamHall54Integrator(
double[] vecAbsoluteTolerance,
double[] vecRelativeTolerance) {
super(
- false,
- c,
- a,
- b,
- new HighamHall54StepInterpolator(),
+ new RungeKuttaFehlbergMethod(false, c, a, b, new HighamHall54StepInterpolator()),
minStep,
maxStep,
vecAbsoluteTolerance,
@@ -156,7 +148,7 @@ public int getOrder() {
* @param y1 estimate of the state at the end of the step before error correction; must align with
* {@code y0} in length and ordering.
* @param h current step size in integration time units; may be positive or negative depending on
- * integration direction.
+ * the integration direction.
* @return dimensionless error ratio; values above one suggest rejecting or shrinking the step
*/
protected double estimateError(double[][] yDotK, double[] y0, double[] y1, double h) {
diff --git a/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergIntegrator.java b/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergIntegrator.java
index 66f9b8ddf2..7272dbdc6a 100644
--- a/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergIntegrator.java
+++ b/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergIntegrator.java
@@ -1,6 +1,8 @@
package org.spaceroots.mantissa.ode;
import java.util.Arrays;
+import java.util.Objects;
+import org.jetbrains.annotations.NotNull;
/**
* This class implements the common part of all Runge-Kutta-Fehlberg integrators for Ordinary
@@ -20,14 +22,14 @@
* | b'1 b'2 ... b's-1 b's
*
*
- * In fact, we rather use the array defined by ej = bj - b'j to compute directly the error rather + *
In fact, we rather use the array defined by ej = bj - b'j to directly compute the error rather * than computing two estimates and then comparing them. * *
Some methods are qualified as fsal (first same as last) methods. This means the last * evaluation of the derivatives in one step is the same as the first in the next step. Then, this - * evaluation can be reused from one step to the next one and the cost of such a method is really - * s-1 evaluations despite the method still has s stages. This behaviour is true only for successful - * steps, if the step is rejected after the error estimation phase, no evaluation is saved. For an + * evaluation can be reused from one step to the next one, and the cost of such a method is really + * s-1 evaluations despite the method still has s stages. This behavior is true only for successful + * steps, if the step is rejected after the error estimation phase, no evaluation is saved. For a * fsal method, we have cs = 1 and asi = bi for all i. * * @version $Id: RungeKuttaFehlbergIntegrator.java 1719 2007-09-26 19:46:57Z luc $ @@ -38,11 +40,7 @@ public abstract class RungeKuttaFehlbergIntegrator extends AdaptiveStepsizeInteg /** * Build a Runge-Kutta integrator with the given Butcher array. * - * @param fsal indicate that the method is an fsal - * @param c time steps from Butcher array (without the first zero) - * @param a internal weights from Butcher array (without the first empty row) - * @param b external weights for the high order method from Butcher array - * @param prototype prototype of the step interpolator to use + * @param method Butcher tableau and interpolator prototype * @param minStep minimal step (must be positive even for backward integration), the last step can * be smaller than this * @param maxStep maximal step (must be positive even for backward integration) @@ -50,11 +48,7 @@ public abstract class RungeKuttaFehlbergIntegrator extends AdaptiveStepsizeInteg * @param scalRelativeTolerance allowed relative error */ protected RungeKuttaFehlbergIntegrator( - boolean fsal, - double[] c, - double[][] a, - double[] b, - RungeKuttaStepInterpolator prototype, + RungeKuttaFehlbergMethod method, double minStep, double maxStep, double scalAbsoluteTolerance, @@ -62,11 +56,11 @@ protected RungeKuttaFehlbergIntegrator( super(minStep, maxStep, scalAbsoluteTolerance, scalRelativeTolerance); - this.fsal = fsal; - this.c = c; - this.a = a; - this.b = b; - this.prototype = prototype; + this.fsal = method.fsal(); + this.c = method.c(); + this.a = method.a(); + this.b = method.b(); + this.prototype = method.prototype(); exp = -1.0 / getOrder(); @@ -80,11 +74,7 @@ protected RungeKuttaFehlbergIntegrator( /** * Build a Runge-Kutta integrator with the given Butcher array. * - * @param fsal indicate that the method is an fsal - * @param c time steps from Butcher array (without the first zero) - * @param a internal weights from Butcher array (without the first empty row) - * @param b external weights for the high order method from Butcher array - * @param prototype prototype of the step interpolator to use + * @param method Butcher tableau and interpolator prototype * @param minStep minimal step (must be positive even for backward integration), the last step can * be smaller than this * @param maxStep maximal step (must be positive even for backward integration) @@ -92,11 +82,7 @@ protected RungeKuttaFehlbergIntegrator( * @param vecRelativeTolerance allowed relative error */ protected RungeKuttaFehlbergIntegrator( - boolean fsal, - double[] c, - double[][] a, - double[] b, - RungeKuttaStepInterpolator prototype, + RungeKuttaFehlbergMethod method, double minStep, double maxStep, double[] vecAbsoluteTolerance, @@ -104,11 +90,11 @@ protected RungeKuttaFehlbergIntegrator( super(minStep, maxStep, vecAbsoluteTolerance, vecRelativeTolerance); - this.fsal = fsal; - this.c = c; - this.a = a; - this.b = b; - this.prototype = prototype; + this.fsal = method.fsal(); + this.c = method.c(); + this.a = method.a(); + this.b = method.b(); + this.prototype = method.prototype(); exp = -1.0 / getOrder(); @@ -215,7 +201,7 @@ public void setMaxGrowth(double maxGrowth) { * @param y0 estimate of the step at the start of the step * @param y1 estimate of the step at the end of the step * @param h current step - * @return error ratio, greater than 1 if step should be rejected + * @return error ratio, greater than 1 if the step should be rejected */ protected abstract double estimateError(double[][] yDotK, double[] y0, double[] y1, double h); @@ -270,7 +256,8 @@ private boolean performStep( accepted = evaluateStepAcceptance(interpolator, error, context); } - updateAfterAcceptance(equations, t, y, forward, stages, context, error, interpolator); + updateAfterAcceptance( + new StepAcceptanceContext(equations, t, y, forward, stages, context, interpolator), error); return computeLastStepFlag(t, forward); } @@ -294,15 +281,16 @@ private void initializeStepIfFirstTime( double[] scale = (vecAbsoluteTolerance != null) ? vecAbsoluteTolerance : buildScalarScale(y); context.hNew = initializeStep( - equations, - forward, - getOrder(), - scale, - stepStart, - y, - context.yDotK[0], - context.yTmp, - context.yDotK[1]); + new StepInitializationContext( + equations, + forward, + getOrder(), + scale, + stepStart, + y, + context.yDotK[0], + context.yTmp, + context.yDotK[1])); context.firstTime = false; } @@ -365,38 +353,38 @@ private boolean evaluateStepAcceptance( return true; } - private void updateAfterAcceptance( - FirstOrderDifferentialEquations equations, - double t, - double[] y, - boolean forward, - int stages, - StepContext context, - double error, - AbstractStepInterpolator interpolator) + private void updateAfterAcceptance(StepAcceptanceContext acceptance, double error) throws DerivativeException, IntegratorException { stepStart += stepSize; - System.arraycopy(context.yTmp, 0, y, 0, y.length); - switchesHandler.stepAccepted(stepStart, y); - boolean lastStep = computeLastStepFlag(t, forward); - interpolator.storeTime(stepStart); - handler.handleStep((StepInterpolator) interpolator, lastStep); + System.arraycopy(acceptance.context().yTmp, 0, acceptance.y(), 0, acceptance.y().length); + switchesHandler.stepAccepted(stepStart, acceptance.y()); + boolean lastStep = computeLastStepFlag(acceptance.t(), acceptance.forward()); + acceptance.interpolator().storeTime(stepStart); + handler.handleStep((StepInterpolator) acceptance.interpolator(), lastStep); if (fsal) { - System.arraycopy(context.yDotK[stages - 1], 0, context.yDotK[0], 0, y.length); + System.arraycopy( + acceptance.context().yDotK[acceptance.stages() - 1], + 0, + acceptance.context().yDotK[0], + 0, + acceptance.y().length); } - if (switchesHandler.reset(stepStart, y) && !lastStep) { - equations.computeDerivatives(stepStart, y, context.yDotK[0]); + if (switchesHandler.reset(stepStart, acceptance.y()) && !lastStep) { + acceptance + .equations() + .computeDerivatives(stepStart, acceptance.y(), acceptance.context().yDotK[0]); } if (!lastStep) { double factor = Math.clamp(safety * Math.pow(error, exp), minReduction, maxGrowth); double scaledH = stepSize * factor; double nextT = stepStart + scaledH; - boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t); - context.hNew = filterStep(scaledH, nextIsLast); + boolean nextIsLast = + acceptance.forward() ? (nextT >= acceptance.t()) : (nextT <= acceptance.t()); + acceptance.context().hNew = filterStep(scaledH, nextIsLast); } } @@ -423,16 +411,79 @@ private StepContext(int stages, int dimension) { } } + private record StepAcceptanceContext( + FirstOrderDifferentialEquations equations, + double t, + double[] y, + boolean forward, + int stages, + StepContext context, + AbstractStepInterpolator interpolator) { + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (!(other + instanceof + StepAcceptanceContext( + FirstOrderDifferentialEquations otherEquations, + double otherT, + double[] otherY, + boolean otherForward, + int otherStages, + StepContext otherContext, + AbstractStepInterpolator otherInterpolator))) { + return false; + } + return otherForward == forward + && otherStages == stages + && Double.doubleToLongBits(otherT) == Double.doubleToLongBits(t) + && Objects.equals(otherEquations, equations) + && Objects.equals(otherContext, context) + && Objects.equals(otherInterpolator, interpolator) + && Arrays.equals(otherY, y); + } + + @Override + public int hashCode() { + int result = Objects.hash(equations, forward, stages, t, context, interpolator); + result = 31 * result + Arrays.hashCode(y); + return result; + } + + @Override + public @NotNull String toString() { + return "StepAcceptanceContext[" + + "equations=" + + equations + + ", t=" + + t + + ", y=" + + Arrays.toString(y) + + ", forward=" + + forward + + ", stages=" + + stages + + ", context=" + + context + + ", interpolator=" + + interpolator + + "]"; + } + } + /** Indicator for fsal methods. */ private final boolean fsal; - /** Time steps from Butcher array (without the first zero). */ + /** Time steps from the Butcher array (without the first zero). */ private final double[] c; - /** Internal weights from Butcher array (without the first empty row). */ + /** Internal weights from the Butcher array (without the first empty row). */ private final double[][] a; - /** External weights for the high order method from Butcher array. */ + /** External weights for the high-order method from the Butcher array. */ private final double[] b; /** Prototype of the step interpolator. */ diff --git a/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergMethod.java b/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergMethod.java new file mode 100644 index 0000000000..030ff544e5 --- /dev/null +++ b/src/main/java/org/spaceroots/mantissa/ode/RungeKuttaFehlbergMethod.java @@ -0,0 +1,96 @@ +package org.spaceroots.mantissa.ode; + +import java.util.Arrays; +import java.util.Objects; +import org.jetbrains.annotations.NotNull; + +/** + * Bundles the Butcher tableau and interpolation prototype for a Fehlberg integrator. + * + *
The record groups the immutable method definition shared across Runge-Kutta-Fehlberg + * constructors: the FSAL flag, tableau coefficients, and the step interpolator prototype that is + * cloned during integration. Instances are simple data carriers; they do not copy the coefficient + * arrays and therefore rely on the caller to treat those arrays as immutable. + * + * @param fsal whether the method has the first-same-as-last property + * @param c time steps from the Butcher tableau (excluding the first zero) + * @param a internal weights from the Butcher tableau (excluding the first empty row) + * @param b external weights for the high-order method + * @param prototype prototype interpolator used for dense output + */ +public record RungeKuttaFehlbergMethod( + boolean fsal, double[] c, double[][] a, double[] b, RungeKuttaStepInterpolator prototype) { + + /** + * Compare this method definition with another for structural equality. + * + *
The comparison treats the coefficient arrays as ordered value sequences rather than identity + * references, ensuring two methods with identical tableau contents are equal even when backed by + * distinct arrays. + * + * @param other candidate object to compare + * @return {@code true} if all components are equal by value + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (!(other + instanceof + RungeKuttaFehlbergMethod( + boolean otherFsal, + double[] otherC, + double[][] otherA, + double[] otherB, + RungeKuttaStepInterpolator otherPrototype))) { + return false; + } + return otherFsal == fsal + && Objects.equals(otherPrototype, prototype) + && Arrays.equals(otherC, c) + && Arrays.deepEquals(otherA, a) + && Arrays.equals(otherB, b); + } + + /** + * Compute a hash code consistent with the array-aware equality contract. + * + *
The hash is composed of the scalar fields and the contents of the coefficient arrays so that + * equal method definitions share the same hash even if their arrays are distinct instances. + * + * @return hash code derived from record components and array contents + */ + @Override + public int hashCode() { + int result = Objects.hash(fsal, prototype); + result = 31 * result + Arrays.hashCode(c); + result = 31 * result + Arrays.deepHashCode(a); + result = 31 * result + Arrays.hashCode(b); + return result; + } + + /** + * Render a descriptive string that includes the contents of each coefficient array. + * + *
The output mirrors the record component order and formats arrays with {@link + * Arrays#toString(double[])} and {@link Arrays#deepToString(Object[])} for easy inspection. + * + * @return human-readable representation containing array contents + */ + @Override + public @NotNull String toString() { + return "RungeKuttaFehlbergMethod[" + + "fsal=" + + fsal + + ", c=" + + Arrays.toString(c) + + ", a=" + + Arrays.deepToString(a) + + ", b=" + + Arrays.toString(b) + + ", prototype=" + + prototype + + "]"; + } +} diff --git a/src/main/java/org/spaceroots/mantissa/ode/StepInitializationContext.java b/src/main/java/org/spaceroots/mantissa/ode/StepInitializationContext.java new file mode 100644 index 0000000000..66d7daddba --- /dev/null +++ b/src/main/java/org/spaceroots/mantissa/ode/StepInitializationContext.java @@ -0,0 +1,143 @@ +package org.spaceroots.mantissa.ode; + +import java.util.Arrays; +import java.util.Objects; +import org.jetbrains.annotations.NotNull; + +/** + * Bundles inputs and work arrays used to estimate an initial step size. + * + *
This record packages the values required by {@link AdaptiveStepsizeIntegrator#initializeStep} + * so callers can pass a single object instead of a long parameter list. It is typically created by + * adaptive integrators right before the first step is chosen, using arrays that already exist in + * the integration workspace. The record itself is immutable, but it stores references to mutable + * arrays owned by the caller; no defensive copies are made, and the arrays may be reused across + * attempts. Treat instances as short-lived, single-thread use objects that describe one + * initialization attempt and provide access to the shared scratch buffers. + * + *
Responsibilities include: + * + *
The comparison uses the record components as the semantic fields but treats the array + * components as ordered value sequences rather than reference identities. The time value is + * compared using raw bit equality to keep {@code NaN} payloads distinct. This method is + * deterministic and side-effect free, which makes it suitable for caching keys or assertions in + * tests. + * + * @param other candidate object to compare; may be null or any type. + * @return {@code true} when all scalar fields match and each array contains equal values. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (!(other + instanceof + StepInitializationContext( + FirstOrderDifferentialEquations otherEquations, + boolean otherForward, + int otherOrder, + double[] otherScale, + double otherT0, + double[] otherY0, + double[] otherYDot0, + double[] otherY1, + double[] otherYDot1))) { + return false; + } + return forward == otherForward + && order == otherOrder + && Double.doubleToLongBits(t0) == Double.doubleToLongBits(otherT0) + && Objects.equals(equations, otherEquations) + && Arrays.equals(scale, otherScale) + && Arrays.equals(y0, otherY0) + && Arrays.equals(yDot0, otherYDot0) + && Arrays.equals(y1, otherY1) + && Arrays.equals(yDot1, otherYDot1); + } + + /** + * Compute a hash code consistent with the array-aware equality contract. + * + *
The hash is composed of the scalar components and the contents of each array. This keeps the + * result stable for identical value sequences even when the arrays are distinct instances. + * Callers should remember that changing any referenced array will change the hash, so this record + * should not be used as a key in mutable array scenarios unless the contents are stable. + * + * @return hash value derived from scalar components and array contents. + */ + @Override + public int hashCode() { + int result = Objects.hash(equations, forward, order, t0); + result = 31 * result + Arrays.hashCode(scale); + result = 31 * result + Arrays.hashCode(y0); + result = 31 * result + Arrays.hashCode(yDot0); + result = 31 * result + Arrays.hashCode(y1); + result = 31 * result + Arrays.hashCode(yDot1); + return result; + } + + /** + * Render a descriptive string that includes the contents of each array component. + * + *
The output mirrors the record component order, formatting arrays with {@link + * Arrays#toString(double[])} so the contents are visible for diagnostics. The returned string is + * non-null and intended for logging or debugging rather than parsing. + * + * @return human-readable representation containing scalar values and array contents. + */ + @Override + public @NotNull String toString() { + return "StepInitializationContext[" + + "equations=" + + equations + + ", forward=" + + forward + + ", order=" + + order + + ", scale=" + + Arrays.toString(scale) + + ", t0=" + + t0 + + ", y0=" + + Arrays.toString(y0) + + ", yDot0=" + + Arrays.toString(yDot0) + + ", y1=" + + Arrays.toString(y1) + + ", yDot1=" + + Arrays.toString(yDot1) + + "]"; + } +}