How to get multipliers after solving a quadratic program in ojAlgo - java

I implement a Sequential quadratic programming (SQP) optimizer and use ojAlgo for the quadratic programming (QP) subproblem.
My question is:
How do I get hold of the "Lagrange multipliers" for the QP solution?
In the attached example code that solve an QP result.getMultipliers() only return an empty Optional.
package com.mycompany.testojalgo;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.ojalgo.matrix.Primitive64Matrix;
import org.ojalgo.optimisation.Expression;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.Optimisation;
import org.ojalgo.optimisation.Variable;
import org.ojalgo.structure.Access1D;
import org.ojalgo.type.StandardType;
import org.ojalgo.type.context.NumberContext;
public class ojAlgoQP {
public static void main(String[] args) {
testOjAlgoQuadraticProgramming();
}
public static void testOjAlgoQuadraticProgramming() {
// QP Example 16.2 p453 in 'Numerical Optimization', 2ed, (2006), Jorge Nocedal and Stephen J. Wright.
// minimize function F(x1,x2,x3) = 3*x1*x1 + 2*x1*x2 + x1*x3 + 2.5*x2*x2 + 2*x2*x3 + 2*x3*x3 - 8*x1 - 3*x2 - 3*x3
// x = [x1, x2, x3]'
// F(x) = 1/2*x'*H*x + x'*g
// constraints x1 + x3 = 3, x2 + x3 = 0
// A*x = b
//objectiveGradient
Primitive64Matrix g = Primitive64Matrix.FACTORY.rows(new double[][]{
{-8}, {-3}, {-3}
});
//objectiveHessian
Primitive64Matrix H = Primitive64Matrix.FACTORY.rows(new double[][]{
{6, 2, 1},
{2, 5, 2},
{1, 2, 4}
});
Variable x1 = new Variable("x1");
Variable x2 = new Variable("x2");
Variable x3 = new Variable("x3");
// constraint equations
Primitive64Matrix A = Primitive64Matrix.FACTORY.rows(new double[][]{
{1, 0, 1},
{0, 1, 1}
});
// required constraint values
Primitive64Matrix b = Primitive64Matrix.FACTORY.rows(new double[][]{
{3}, {0}
});
List<Variable> variables = new ArrayList<>();
variables.add(x1);
variables.add(x2);
variables.add(x3);
ExpressionsBasedModel model = new ExpressionsBasedModel(variables);
Expression energy = model.addExpression("Energy");
energy.setLinearFactors(variables, g);
//divide by two to express function using hessian.
energy.setQuadraticFactors(variables, H.divide(2));
energy.weight(BigDecimal.ONE);
//create constraint equations
for (int i = 0; i < A.countRows(); i++) {
Expression expression = model.addExpression("Constraint#"+i);
for (int j = 0; j < A.countColumns(); j++) {
expression.set(variables.get(j), A.get(i, j));
}
expression.level(b.get(i));
}
Optimisation.Result result = model.minimise();
NumberContext accuracy = StandardType.PERCENT.withPrecision(1);
boolean ok = model.validate(result, accuracy);
Optimisation.State v = result.getState();
// How do I get the multipliers
Optional<Access1D<?>> multipliers = result.getMultipliers();
double value1 = result.getValue();
// Get result and check value and constraint
Primitive64Matrix x = Primitive64Matrix.FACTORY.rows(new double[][]{
{x1.getValue().doubleValue()}, {x2.getValue().doubleValue()}, {x3.getValue().doubleValue()}
});
//divide by two to express function using hessian, again.
Primitive64Matrix value = x.transpose().multiply(H.divide(2)).multiply(x).add(x.transpose().multiply(g));
Primitive64Matrix residual= A.multiply(x).subtract(b);
}
}
Update 1:
Here is my reworked example using org.ojalgo.optimisation.convex.ConvexSolver.getBuilder();
package com.mycompany.testojalgo;
import java.util.Optional;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.matrix.store.Primitive64Store;
import org.ojalgo.optimisation.Optimisation;
import org.ojalgo.optimisation.convex.ConvexSolver;
import org.ojalgo.structure.Access1D;
public class ojAlgoQP {
public static void main(String[] args) {
testOjAlgoQuadraticProgramming2();
}
public static void testOjAlgoQuadraticProgramming2() {
// QP Example 16.2 p453 in 'Numerical Optimization', 2ed, (2006), Jorge Nocedal and Stephen J. Wright.
// minimize function F(x1,x2,x3) = 3*x1*x1 + 2*x1*x2 + x1*x3 + 2.5*x2*x2 + 2*x2*x3 + 2*x3*x3 - 8*x1 - 3*x2 - 3*x3
// x = [x1, x2, x3]'
// F(x) = 1/2*x'*H*x + x'*g
// constraints x1 + x3 = 3, x2 + x3 = 0
// A*x = b
//objectiveGradient
Primitive64Store gStore = Primitive64Store.FACTORY.rows(new double[][]{
{-8}, {-3}, {-3}
});
//objectiveHessian
Primitive64Store HStore = Primitive64Store.FACTORY.rows(new double[][]{
{6, 2, 1},
{2, 5, 2},
{1, 2, 4}
});
// constraint equations
Primitive64Store AStore = Primitive64Store.FACTORY.rows(new double[][]{
{1, 0, 1},
{0, 1, 1}
});
// required constraint values
Primitive64Store bStore = Primitive64Store.FACTORY.rows(new double[][]{
{3}, {0}
});
ConvexSolver.Builder builder = ConvexSolver.getBuilder();
builder.equalities(AStore, bStore);
builder.objective(HStore, gStore.negate());
ConvexSolver solver = builder.build();
Optimisation.Result result = solver.solve();
// How do I get the multipliers? multipliers = Optional.empty
Optional<Access1D<?>> multipliers = result.getMultipliers();
// value1 = -3.5
double value1 = result.getValue();
// Verify result:
// x= [2.0, -0.9999999999999996, 0.9999999999999997]';
// value = -3.5
// residual =[-4.440892098500626E-16, 1.1102230246251565E-16]'
Primitive64Store x = Primitive64Store.FACTORY.column(result.toRawCopy1D());
MatrixStore<Double> value = x.transpose().multiply(HStore.multiply(0.5)).multiply(x).add(x.transpose().multiply(gStore));
MatrixStore<Double> residual = AStore.multiply(x).subtract(bStore);
}
}

I believe that is an Optional because it was (sometimes) too messy to map the Lagrange multipliers from the solver to the constraints of the model.
If you're implementing an SQP solver may I suggest that you don't implement it in terms of ExpressionsBasedModel, but delegate to the convex solvers directly. Build something that implements org.ojalgo.optimisation.Optimisation.Solver and delegate to the various classes in the org.ojalgo.optimisation.convex package. Then you code more directly with the matrices, vectors and multipliers.
To make that solver usable by ExpressionsBasedModel you also implement an org.ojalgo.optimisation.Optimisation.Integration and register that by calling ExpressionsBasedModel.addPreferredSolver(myIntegeration) or ExpressionsBasedModel.addFallbackSolver(myIntegeration).
Implementing a solver and making it usable from the modelling tool are two separate things.

Related

Using Apache math for linear regression with weights

I've been using Apache math for a while to do a multiple linear regression using OLSMultipleLinearRegression. Now I need to extend my solution to include a weighting factor for each data point.
I'm trying to replicate the MATLAB function fitlm.
I have a MATLAB call like:
table_data = table(points_scored, height, weight, age);
model = fitlm( table_data, 'points_scored ~ -1, height, weight, age', 'Weights', data_weights)
From 'model' I get the regression coefficients for height, weight, age.
In Java the code I have now is (roughly):
double[][] variables = double[grades.length][3];
// Fill in variables for height, weight, age,
...
OLSMultipleLinearRegression regression = new OLSMultipleLinearRegression();
regression.setNoIntercept(true);
regression.newSampleData(points_scored, variables);
There does not appear to be a way to add weightings to OLSMultipleLinearRegression. There does appear to be a way to add weights to the LeastSquaresBuilder. However I'm having trouble figuring out exactly how to use this. My biggest problem (I think) is creating the jacobians that are expected.
Here is most of what I tried:
double[] points_scored = //fill in points scored
double[] height = //fill in
double[] weight = //fill in
double[] age = // fill in
MultivariateJacobianFunction distToResidual= coeffs -> {
RealVector value = new ArrayRealVector(points_scored.length);
RealMatrix jacobian = new Array2DRowRealMatrix(points_scored.length, 3);
for (int i = 0; i < measures.length; ++i) {
double residual = points_scored[i];
residual -= coeffs.getEntry(0) * height[i];
residual -= coeffs.getEntry(1) * weight[i];
residual -= coeffs.getEntry(2) * age[i];
value.setEntry(i, residual);
//No idea how to set up the jacobian here
}
return new Pair<RealVector, RealMatrix>(value, jacobian);
};
double[] prescribedDistancesToLine = new double[measures.length];
Arrays.fill(prescribedDistancesToLine, 0);
double[] starts = new double[] {1, 1, 1};
LeastSquaresProblem problem = new LeastSquaresBuilder().
start(starts).
model(distToResidual).
target(prescribedDistancesToLine).
lazyEvaluation(false).
maxEvaluations(1000).
maxIterations(1000).
build();
LeastSquaresOptimizer.Optimum optimum = new LevenbergMarquardtOptimizer().optimize(problem);
Since I don't know how to make the jacobian values I've just been stabbing in the dark and getting coefficient nowhere near the MATLAB answers. Once I get this part working I know that adding the weights should be a pretty straight forward extra line int the LeastSquaresBuilder.
Thanks for any help in advance!
You can use class GLSMultipleLinearRegression from Apache math.
For example, let find linear regression for three plane data points
(0, 0), (1, 2), (2, 0) with weights 1, 2, 1:
import org.apache.commons.math3.stat.regression.GLSMultipleLinearRegression;
public class Main {
public static void main(String[] args) {
GLSMultipleLinearRegression regr = new GLSMultipleLinearRegression();
regr.setNoIntercept(false);
double[] y = new double[]{0.0, 2.0, 0.0};
double[][] x = new double[3][];
x[0] = new double[]{0.0};
x[1] = new double[]{1.0};
x[2] = new double[]{2.0};
double[][] omega = new double[3][];
omega[0] = new double[]{1.0, 0.0, 0.0};
omega[1] = new double[]{0.0, 0.5, 0.0};
omega[2] = new double[]{0.0, 0.0, 1.0};
regr.newSampleData(y, x, omega);
double[] params = regr.estimateRegressionParameters();
System.out.println("Slope: " + params[1] + ", intercept: " + params[0]);
}
}
Note that the omega matrix is diagonal, and its diagonal elements are reciprocal weights.
View the documentation for multi-variable case.

Choco Solver setObjective maximize polynominal equation

I'm currently trying out Choco Solver (4.0.8) and I'm trying to solve this equations:
Maximize
subject to
I'm stuck on maximising the first equation. I guess I just need a hint which subtype of Varaible EQUATION should be.
Model model = new Model("my first problem");
BoolVar x1 = model.boolVar("x1");
BoolVar x2 = model.boolVar("x2");
BoolVar x3 = model.boolVar("x3");
BoolVar x4 = model.boolVar("x4");
BoolVar[] bools = {x1, x2, x3, x4};
int[] c = {5, 7, 4, 3};
int[] c2 = {8, 11, 6, 4};
Variable EQUATION = new Variable();
model.scalar(bools, c, "<=", 14).post(); // 5x1 + 7x2 + 4x3 + 3x4 ≤ 14
model.setObjective(Model.MAXIMIZE, EQUATION); // 8x1 + 11x2 + 6x3 + 4x4
model.getSolver().solve();
System.out.println(x1);
System.out.println(x2);
System.out.println(x3);
System.out.println(x4);
It seems I have found a solution like this:
Variable EQUATION = new ScaleView(x1, 8)
.add(new ScaleView(x2, 11),
new ScaleView(x3, 6),
new ScaleView(x4, 4)).intVar();

apache.commons.math3 - how to use linear programming?

commons-math (ver. 2.2) had an LP solver.
Here I found the follow example code:
import java.util.ArrayList;
import java.util.Collection;
import org.apache.commons.math.optimization.GoalType;
import org.apache.commons.math.optimization.OptimizationException;
import org.apache.commons.math.optimization.RealPointValuePair;
import org.apache.commons.math.optimization.linear.LinearConstraint;
import org.apache.commons.math.optimization.linear.LinearObjectiveFunction;
import org.apache.commons.math.optimization.linear.Relationship;
import org.apache.commons.math.optimization.linear.SimplexSolver;
#SuppressWarnings("deprecation")
public class Main {
#SuppressWarnings({ "rawtypes", "unchecked"})
public static void main(String[] args) {
//describe the optimization problem
LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 3, 5}, 0);
Collection constraints = new ArrayList();
constraints.add(new LinearConstraint(new double[] { 2, 8}, Relationship.LEQ, 13));
constraints.add(new LinearConstraint(new double[] { 5, -1}, Relationship.LEQ, 11));
constraints.add(new LinearConstraint(new double[] { 1, 0}, Relationship.GEQ, 0));
constraints.add(new LinearConstraint(new double[] { 0, 1}, Relationship.GEQ, 0));
//create and run solver
RealPointValuePair solution = null;
try {
solution = new SimplexSolver().optimize(f, constraints, GoalType.MAXIMIZE, false);
}
catch (OptimizationException e) {
e.printStackTrace();
}
if (solution != null) {
//get solution
double max = solution.getValue();
System.out.println("Opt: " + max);
//print decision variables
for (int i = 0; i < 2; i++) {
System.out.print(solution.getPoint()[i] + "\t");
}
}
}
}
However, when adding the maven dependency of the latest math version (3.6.1)
I see most related classes are deprecated, and I haven't found any code examples for the updated versions.
Would be glad to use 3.6.1 for my LP problems - can someone assist here please?
The example from the link is using commons-math version 2.
The main package from commons math seems to change from version 2 org.apache.commons.math to org.apache.commons.math3 in version 3.
The classes used in the example are from org.apache.commons.math.optimization package, in this concret case the package in the new version is org.apache.commons.math3.optim.
The sample code from version 2, to version 3 looks like:
package commons.math;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.commons.math3.optim.PointValuePair;
import org.apache.commons.math3.optim.linear.LinearConstraint;
import org.apache.commons.math3.optim.linear.LinearConstraintSet;
import org.apache.commons.math3.optim.linear.LinearObjectiveFunction;
import org.apache.commons.math3.optim.linear.Relationship;
import org.apache.commons.math3.optim.linear.SimplexSolver;
import org.apache.commons.math3.optim.nonlinear.scalar.GoalType;
public class MathTest {
public static void main(String[] args) {
//describe the optimization problem
LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 3, 5}, 0);
Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
constraints.add(new LinearConstraint(new double[] { 2, 8}, Relationship.LEQ, 13));
constraints.add(new LinearConstraint(new double[] { 5, -1}, Relationship.LEQ, 11));
constraints.add(new LinearConstraint(new double[] { 1, 0}, Relationship.GEQ, 0));
constraints.add(new LinearConstraint(new double[] { 0, 1}, Relationship.GEQ, 0));
//create and run solver
PointValuePair solution = null;
solution = new SimplexSolver().optimize(f, new LinearConstraintSet(constraints), GoalType.MAXIMIZE);
if (solution != null) {
//get solution
double max = solution.getValue();
System.out.println("Opt: " + max);
//print decision variables
for (int i = 0; i < 2; i++) {
System.out.print(solution.getPoint()[i] + "\t");
}
}
}
}
Hope it helps,

Fail to use ANN_MLP in Opencv 3 in Java

I tried using ANN_MLP in OpenCV 3.0 to train a model for a simple XOR operation (i.e. 00->0, 01->1, 10->1, 11->0). But it returned NaN when I called nn.predict. What's wrong with the code? Here is my Java code:
package jm.app;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.TermCriteria;
import org.opencv.ml.ANN_MLP;
import org.opencv.ml.Ml;
import org.opencv.ml.StatModel;
public class Main {
static{System.loadLibrary(Core.NATIVE_LIBRARY_NAME);}
public static void main(String[] args) {
Mat trainData = new Mat(4, 2, CvType.CV_32FC1);
trainData.put(0, 0, 0);
trainData.put(0, 1, 0);
trainData.put(1, 0, 0);
trainData.put(1, 1, 1);
trainData.put(2, 0, 1);
trainData.put(2, 1, 0);
trainData.put(3, 0, 1);
trainData.put(3, 1, 1);
Mat trainLabels = new Mat(4, 1, CvType.CV_32FC1);
trainLabels.put(0, 0, 0);
trainLabels.put(1, 0, 1);
trainLabels.put(2, 0, 1);
trainLabels.put(3, 0, 0);
ANN_MLP nn = ANN_MLP.create();
nn.setActivationFunction(ANN_MLP.SIGMOID_SYM);
nn.setTrainMethod(ANN_MLP.BACKPROP);
nn.setBackpropMomentumScale(0.1);
nn.setBackpropWeightScale(0.1);
nn.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, (int)100000, 0.000001));
Mat layers = new Mat(1, 3, CvType.CV_32SC1);
layers.put(0, 0, 2);
layers.put(0, 1, 3);
layers.put(0, 2, 1);
nn.setLayerSizes(layers);
nn.train(trainData, Ml.ROW_SAMPLE, trainLabels);
Mat testData = new Mat(1, 2, CvType.CV_32FC1);
testData.put(0, 0, 1);
testData.put(0, 1, 1);
Mat testLabels = new Mat(1, 1, CvType.CV_32FC1);
float res = nn.predict(testData, testLabels, ANN_MLP.RAW_OUTPUT);
Util.printMat(testLabels);
Mat layer1 = nn.getWeights(0);
Mat layer2 = nn.getWeights(1);
Mat layer3 = nn.getWeights(2);
Util.printMat(layer1);
Util.printMat(layer2);
Util.printMat(layer3);
}
}
package jm.app;
import org.opencv.core.Mat;
public class Util {
public static void printMat(Mat mat){
System.out.println();
System.out.print(mat.rows() + " * " + mat.cols());
for(int i = 0; i < mat.rows(); i++){
System.out.println();
for(int j = 0; j < mat.cols(); j++){
System.out.print(mat.get(i, j)[0] + " ");
}
}
System.out.println();
}
}
And the output is:
1 * 1
NaN
1 * 4
2.0 -1.0 2.0 -1.0
3 * 3
-0.417962425638773 -0.11805564491195578 0.7527567170648859
0.40930192249590086 -0.24876980957807385 -0.2929439299929529
0.6025307693048867 0.2936134607392147 -0.10605986687856579
4 * 1
0.5558049015443158
0.4766362469511742
0.3713056187114578
-0.24058588929784652
So I have two questions:
1. Why the "testLabel" got a NaN value?
2. Why the "layer1" is a 1*4 matrix? What did the "layer1" do here?

Extrapolation in java

I've been able to use Apache Math's interpolation using the LinearInterpolator().interpolate(x1, y1). Unfortunately, I could not find a way to extrapolate.
How can I do linear extrapolation in java?
x1 = [1, 2, 3, 4, 5];
y1 = [2, 4, 8, 16, 32];
I would like to know the values of any x2 not just the one in the range of the x1.
If I try to extract the value of 6 I get an: OutOfRangeException if {#code v} is outside of the domain of the
* spline function (smaller than the smallest knot point or larger than the
largest knot point).
Edit: Here is my simple interpolate function. I would like an option to enable the extrapolation just like in MathLab(interp2). Using x1 and y1 arrays an input for that function I get the Apache's OutOfRangeException because the value 6 is not contained in the x1 array.
public static List<Double> interpolateLinear(double[] x1, double[] y1, Double[] x2) {
List<Double> resultList;
final PolynomialSplineFunction function = new LinearInterpolator().interpolate(x1, y1);
resultList = Arrays.stream(x2).map(aDouble -> function.value(aDouble)).collect(Collectors.toList());
return resultList;
}
Edit2: Had to read a little bit on the .value method of the PolynomialSplineFunction object to get it right but there it goes (all the credit goes to user Joni) Thanks man:
public static double[] interpolateLinear(double[] x1, double[] y1, double[] x2) {
final PolynomialSplineFunction function = new LinearInterpolator().interpolate(x1, y1);
final PolynomialFunction[] splines = function.getPolynomials();
final PolynomialFunction firstFunction = splines[0];
final PolynomialFunction lastFunction = splines[splines.length - 1];
final double[] knots = function.getKnots();
final double firstKnot = knots[0];
final double lastKnot = knots[knots.length - 1];
double[] resultList = Arrays.stream(x2).map(aDouble -> {
if (aDouble > lastKnot) {
return lastFunction.value(aDouble - knots[knots.length - 2]);
} else if (aDouble < firstKnot)
return firstFunction.value(aDouble - knots[0]);
return function.value(aDouble);
}).toArray();
return resultList;
}
You can get the first and last polynomial splines from the interpolator, and use those to extrapolate.
PolynomialSplineFunction function = new LinearInterpolator().interpolate(x1, y1);
PolynomialFunction[] splines = function.getPolynomials();
PolynomialFunction first = splines[0];
PolynomialFunction last = splines[splines.length-1];
// use first and last to extrapolate
You won't get 64 from 6 though. You should expect 48 from a linear extrapolation. Which goes to show that extrapolation is bound to give you wrong answers.
I have similar problem, the interpolation part is a cubic spline function, and math3.analysis.polynomials.PolynomialSplineFunction does not support the extrapolation.
In the end, I decide to write the linear extrapolation based on the leftest(/rightest) two points (i.e. x1,x2 and y1, y2). I need the extrapolation part to avoid that the function fails or get any very irregular values in the extrapolation region. In my example, I hard coded such that the extrapolated value should stay in [0.5* y1, 2 * y1] (left side) or [0.5 * yn, 2 *yn] (right side).
As mentioned by Joni, the extrapolation is dangerous, and it could leads to unexpected results. Be careful. The linear extrapolation can be replaced by any other kind extrapolation, depending on how you write the code (e.g. using the derivative at the right/left point and inferring a quadratic function for extrapolation.)
public static double getValue(PolynomialSplineFunction InterpolationFunction, double v) {
try {
return InterpolationFunction.value(v);
} catch (OutOfRangeException e) {
// add the extrapolation function: we use linear extrapolation based on the slope of the two points on the left or right
double[] InterpolationKnots = InterpolationFunction.getKnots();
int n = InterpolationKnots.length;
double first, second, firstValue, secondValue;
if (v < InterpolationKnots[0])
{ // extrapolation from the left side, linear extrapolation based on the first two points on the left
first = InterpolationKnots[0]; // the leftest point
second = InterpolationKnots[1]; // the second leftest point
}
else { // extrapolation on the right side, linear extrapolation based on the first two points on the right
first = InterpolationKnots[n - 1]; // the rightest point
second = InterpolationKnots[n - 2]; // the second rightest point
}
firstValue = InterpolationFunction.value(first);
secondValue = InterpolationFunction.value(second);
double extrapolatedValue = (firstValue - secondValue) / (first - second) * (v - first) + firstValue;
// add a boundary to the extrapolated value so that it is within [0.5, 2] * firstValue
if (extrapolatedValue > 2 * firstValue){ extrapolatedValue = 2 * firstValue;}
if (extrapolatedValue < 0.5 * firstValue) {extrapolatedValue = 0.5* firstValue;}
return extrapolatedValue;
}
}
Just sharing a complete example based on the answer provided by Joni:
import java.util.Arrays;
import org.apache.commons.math3.analysis.interpolation.LinearInterpolator;
import org.apache.commons.math3.analysis.polynomials.PolynomialFunction;
import org.apache.commons.math3.analysis.polynomials.PolynomialSplineFunction;
public class App {
public static void main(String[] args) {
double[] x1 = { 1, 2, 3, 4, 5 };
double[] y1 = { 2, 4, 8, 16, 32 };
double[] x2 = { 6, 7 };
double[] res = interpolateLinear(x1, y1, x2);
for (int i = 0; i < res.length; i++) {
System.out.println("Value: " + x2[i] + " => extrapolation: " + res[i]);
}
}
public static double[] interpolateLinear(double[] x1, double[] y1, double[] x2) {
final PolynomialSplineFunction function = new LinearInterpolator().interpolate(x1, y1);
final PolynomialFunction[] splines = function.getPolynomials();
final PolynomialFunction firstFunction = splines[0];
final PolynomialFunction lastFunction = splines[splines.length - 1];
final double[] knots = function.getKnots();
final double firstKnot = knots[0];
final double lastKnot = knots[knots.length - 1];
double[] resultList = Arrays.stream(x2).map(aDouble -> {
if (aDouble > lastKnot) {
return lastFunction.value(aDouble - knots[knots.length - 2]);
} else if (aDouble < firstKnot)
return firstFunction.value(aDouble - knots[0]);
return function.value(aDouble);
}).toArray();
return resultList;
}
}

Categories