use of org.apache.commons.math3.linear.DecompositionSolver in project portfolio by buchen.
the class Rebalancer method solve.
public RebalancingSolution solve() {
if (currency == null)
currency = fallbackCurrency;
if (constraintsToSolve.isEmpty()) {
// Apache commons math library does not like "empty" matrices.
return RebalancingSolution.EMPTY_REBALANCING_SOLUTION;
}
int numberOfConstraints = constraintsToSolve.size();
int numberOfInvestmentVehicles = investmentVehicleIndices.size();
double[][] matrixArray = new double[Math.max(numberOfConstraints, numberOfInvestmentVehicles)][numberOfInvestmentVehicles];
// We make sure that the matrix is at least as high as wide, because otherwise the Appache Commons Math library
// does not compute the full SVD.
// We need the full SVD, because we also want to compute the kernel to find out which values are ambiguous (if any).
double[] vectorArray = new double[Math.max(numberOfConstraints, numberOfInvestmentVehicles)];
for (int i = 0; i < numberOfConstraints; i++) {
RebalancingConstraint constraint = constraintsToSolve.get(i);
for (Map.Entry<InvestmentVehicle, Double> entry : constraint.linearEquation.entrySet()) {
int index = investmentVehicleIndices.get(entry.getKey());
matrixArray[i][index] = entry.getValue();
}
// amount is with a value with a factor,
vectorArray[i] = constraint.targetAmount.getAmount();
// but we don't divide by the factor here...
}
RealMatrix coefficients = new Array2DRowRealMatrix(matrixArray);
RealVector target = new ArrayRealVector(vectorArray);
// We use SVD, because it gives us least square solution for unsolvable linear equation systems
// and can handle singular matrices.
SingularValueDecomposition decomp = new SingularValueDecomposition(coefficients);
DecompositionSolver solver = decomp.getSolver();
RealVector solution = solver.solve(target);
Map<InvestmentVehicle, Money> solutionMap = new HashMap<>();
for (int i = 0; i < numberOfInvestmentVehicles; i++) {
// ... so we don't have to multiply by the factor here.
Money money = Money.of(currency, Math.round(solution.getEntry(i)));
solutionMap.put(investmentVehicles.get(i), money);
}
// Tolerance for "being inside the kernel"
double tol = Math.max(Math.max(numberOfConstraints, numberOfInvestmentVehicles) * decomp.getSingularValues()[0] * 0x1.0p-50, Math.sqrt(Precision.SAFE_MIN));
RealVector testTarget = coefficients.operate(solution);
double matrixMax = Precision.SAFE_MIN;
for (int i = 0; i < numberOfConstraints; i++) for (int j = 0; j < numberOfInvestmentVehicles; j++) matrixMax = Math.max(matrixMax, Math.abs(coefficients.getEntry(i, j)));
double solutionVectorMax = Precision.SAFE_MIN;
for (int i = 0; i < numberOfInvestmentVehicles; i++) solutionVectorMax = Math.max(solutionVectorMax, Math.abs(solution.getEntry(i)));
// Due to too many constraints, there might be no exact solution. Find out which securities are
// in too many constraints.
Set<InvestmentVehicle> inexactResults = new HashSet<>();
for (int i = 0; i < numberOfConstraints; i++) {
double distance = Math.abs(testTarget.getEntry(i) - target.getEntry(i));
if (distance >= tol * matrixMax * solutionVectorMax) {
// Constraint i is not satisfied exactly. => Mark all securities in this constraint as not exact.
for (InvestmentVehicle investmentVehicle : constraintsToSolve.get(i).linearEquation.keySet()) inexactResults.add(investmentVehicle);
}
}
// Due to too few constraints, some results might be ambiguous. Find out which.
int rank = decomp.getRank();
boolean isAmbiguous = rank < investmentVehicleIndices.size();
Set<InvestmentVehicle> ambigousResults;
if (isAmbiguous) {
ambigousResults = new HashSet<>();
// Let the matrix M ∈ ℝ^{m × n} be our coefficient matrix, r the rank of M and M = S Σ V* be the SVD of M:
// Then the kernel of M is spanned by the last n - r vectors of V.
// The i-th component of the solution vector is ambiguous iff there is a kernel vector where the i-th component is non-zero.
RealMatrix matrixV = decomp.getV();
List<RealVector> kernelBasis = new ArrayList<>(numberOfInvestmentVehicles - rank);
for (int columnVectorIndexV = rank; columnVectorIndexV < numberOfInvestmentVehicles; columnVectorIndexV++) kernelBasis.add(matrixV.getColumnVector(columnVectorIndexV));
for (int i = 0; i < numberOfInvestmentVehicles; i++) {
boolean isThisAmbiguous = false;
for (RealVector kernelBasisVector : kernelBasis) isThisAmbiguous = isThisAmbiguous || Math.abs(kernelBasisVector.getEntry(i)) >= tol;
if (isThisAmbiguous)
ambigousResults.add(investmentVehicles.get(i));
}
} else
ambigousResults = Collections.emptySet();
return new RebalancingSolution(solutionMap, ambigousResults, inexactResults);
}
use of org.apache.commons.math3.linear.DecompositionSolver in project imagingbook-calibrate by imagingbook.
the class RadialDistortionEstimator method estimateLensDistortion.
/**
* Estimates the lens distortion from multiple views, starting from an initial (linear) camera model.
* @param cam the initial (linear) camera model
* @param views a sequence of extrinsic view transformations
* @param modelPts the set of 2D model points (on the planar calibration target)
* @param obsPts a sequence of 2D image point sets, one set for each view
* @return a vector of lens distortion coefficients
*/
protected double[] estimateLensDistortion(Camera cam, ViewTransform[] views, Point2D[] modelPts, Point2D[][] obsPts) {
// the number of views
final int M = views.length;
// the number of model points
final int N = modelPts.length;
final double uc = cam.getUc();
final double vc = cam.getVc();
RealMatrix D = MatrixUtils.createRealMatrix(2 * M * N, 2);
RealVector d = new ArrayRealVector(2 * M * N);
int l = 0;
for (int i = 0; i < M; i++) {
Point2D[] obs = obsPts[i];
for (int j = 0; j < N; j++) {
// determine the radius in the ideal image plane
double[] xy = cam.projectNormalized(views[i], modelPts[j]);
double x = xy[0];
double y = xy[1];
double r2 = x * x + y * y;
double r4 = r2 * r2;
// project model point to image
double[] uv = cam.project(views[i], modelPts[j]);
double u = uv[0];
double v = uv[1];
// distance to estim. projection center
double du = u - uc;
double dv = v - vc;
D.setEntry(l * 2 + 0, 0, du * r2);
D.setEntry(l * 2 + 0, 1, du * r4);
D.setEntry(l * 2 + 1, 0, dv * r2);
D.setEntry(l * 2 + 1, 1, dv * r4);
// observed image point
Point2D UV = obs[j];
double U = UV.getX();
double V = UV.getY();
d.setEntry(l * 2 + 0, U - u);
d.setEntry(l * 2 + 1, V - v);
l++;
}
}
DecompositionSolver solver = new SingularValueDecomposition(D).getSolver();
RealVector k = solver.solve(d);
return k.toArray();
}
use of org.apache.commons.math3.linear.DecompositionSolver in project neqsim by equinor.
the class sysNewtonRhapsonPhaseEnvelope2 method solve.
/**
* <p>
* solve.
* </p>
*
* @param np a int
*/
public void solve(int np) {
RealMatrix dx;
iter = 0;
do {
iter++;
init();
setfvec();
setJac();
DecompositionSolver solver2 = new LUDecomposition(Jac).getSolver();
dx = solver2.solve(fvec);
u = u.subtract(dx);
if (iter > 6) {
logger.info("iter > " + iter);
calcInc(np);
solve(np);
break;
}
logger.info("feilen: " + dx.getNorm() / u.getNorm());
} while (dx.getNorm() / u.getNorm() > 1.e-10 && !Double.isNaN(dx.getNorm()));
logger.info("iter: " + iter);
init();
}
use of org.apache.commons.math3.linear.DecompositionSolver in project knime-core by knime.
the class Learner method irlsRls.
/**
* Do a irls step. The result is stored in beta.
*
* @param data over trainings data.
* @param beta parameter vector
* @param rC regressors count
* @param tcC target category count
* @throws CanceledExecutionException when method is cancelled
*/
private void irlsRls(final RegressionTrainingData data, final RealMatrix beta, final int rC, final int tcC, final ExecutionMonitor exec) throws CanceledExecutionException {
Iterator<RegressionTrainingRow> iter = data.iterator();
long rowCount = 0;
int dim = (rC + 1) * (tcC - 1);
RealMatrix xTwx = new Array2DRowRealMatrix(dim, dim);
RealMatrix xTyu = new Array2DRowRealMatrix(dim, 1);
RealMatrix x = new Array2DRowRealMatrix(1, rC + 1);
RealMatrix eBetaTx = new Array2DRowRealMatrix(1, tcC - 1);
RealMatrix pi = new Array2DRowRealMatrix(1, tcC - 1);
final long totalRowCount = data.getRowCount();
while (iter.hasNext()) {
rowCount++;
RegressionTrainingRow row = iter.next();
exec.checkCanceled();
exec.setProgress(rowCount / (double) totalRowCount, "Row " + rowCount + "/" + totalRowCount);
x.setEntry(0, 0, 1);
x.setSubMatrix(row.getParameter().getData(), 0, 1);
for (int k = 0; k < tcC - 1; k++) {
RealMatrix betaITx = x.multiply(beta.getSubMatrix(0, 0, k * (rC + 1), (k + 1) * (rC + 1) - 1).transpose());
eBetaTx.setEntry(0, k, Math.exp(betaITx.getEntry(0, 0)));
}
double sumEBetaTx = 0;
for (int k = 0; k < tcC - 1; k++) {
sumEBetaTx += eBetaTx.getEntry(0, k);
}
for (int k = 0; k < tcC - 1; k++) {
double pik = eBetaTx.getEntry(0, k) / (1 + sumEBetaTx);
pi.setEntry(0, k, pik);
}
// fill the diagonal blocks of matrix xTwx (k = k')
for (int k = 0; k < tcC - 1; k++) {
for (int i = 0; i < rC + 1; i++) {
for (int ii = i; ii < rC + 1; ii++) {
int o = k * (rC + 1);
double v = xTwx.getEntry(o + i, o + ii);
double w = pi.getEntry(0, k) * (1 - pi.getEntry(0, k));
v += x.getEntry(0, i) * w * x.getEntry(0, ii);
xTwx.setEntry(o + i, o + ii, v);
xTwx.setEntry(o + ii, o + i, v);
}
}
}
// fill the rest of xTwx (k != k')
for (int k = 0; k < tcC - 1; k++) {
for (int kk = k + 1; kk < tcC - 1; kk++) {
for (int i = 0; i < rC + 1; i++) {
for (int ii = i; ii < rC + 1; ii++) {
int o1 = k * (rC + 1);
int o2 = kk * (rC + 1);
double v = xTwx.getEntry(o1 + i, o2 + ii);
double w = -pi.getEntry(0, k) * pi.getEntry(0, kk);
v += x.getEntry(0, i) * w * x.getEntry(0, ii);
xTwx.setEntry(o1 + i, o2 + ii, v);
xTwx.setEntry(o1 + ii, o2 + i, v);
xTwx.setEntry(o2 + ii, o1 + i, v);
xTwx.setEntry(o2 + i, o1 + ii, v);
}
}
}
}
int g = (int) row.getTarget();
// fill matrix xTyu
for (int k = 0; k < tcC - 1; k++) {
for (int i = 0; i < rC + 1; i++) {
int o = k * (rC + 1);
double v = xTyu.getEntry(o + i, 0);
double y = k == g ? 1 : 0;
v += (y - pi.getEntry(0, k)) * x.getEntry(0, i);
xTyu.setEntry(o + i, 0, v);
}
}
}
if (m_penaltyTerm > 0.0) {
RealMatrix stdError = getStdErrorMatrix(xTwx);
// do not penalize the constant terms
for (int i = 0; i < tcC - 1; i++) {
stdError.setEntry(i * (rC + 1), i * (rC + 1), 0);
}
xTwx = xTwx.add(stdError.scalarMultiply(-0.00001));
}
exec.checkCanceled();
b = xTwx.multiply(beta.transpose()).add(xTyu);
A = xTwx;
if (rowCount < A.getColumnDimension()) {
throw new IllegalStateException("The dataset must have at least " + A.getColumnDimension() + " rows, but it has only " + rowCount + " rows. It is recommended to use a " + "larger dataset in order to increase accuracy.");
}
DecompositionSolver solver = new QRDecomposition(A).getSolver();
boolean isNonSingular = solver.isNonSingular();
if (isNonSingular) {
RealMatrix betaNew = solver.solve(b);
beta.setSubMatrix(betaNew.transpose().getData(), 0, 0);
} else {
throw new RuntimeException(FAILING_MSG);
}
}
use of org.apache.commons.math3.linear.DecompositionSolver in project knime-core by knime.
the class Learner method irlsRls.
/**
* Do a irls step. The result is stored in beta.
*
* @param data over trainings data.
* @param beta parameter vector
* @param rC regressors count
* @param tcC target category count
* @throws CanceledExecutionException when method is cancelled
*/
private void irlsRls(final RegressionTrainingData data, final RealMatrix beta, final int rC, final int tcC, final ExecutionMonitor exec) throws CanceledExecutionException {
Iterator<RegressionTrainingRow> iter = data.iterator();
long rowCount = 0;
int dim = (rC + 1) * (tcC - 1);
RealMatrix xTwx = new Array2DRowRealMatrix(dim, dim);
RealMatrix xTyu = new Array2DRowRealMatrix(dim, 1);
RealMatrix x = new Array2DRowRealMatrix(1, rC + 1);
RealMatrix eBetaTx = new Array2DRowRealMatrix(1, tcC - 1);
RealMatrix pi = new Array2DRowRealMatrix(1, tcC - 1);
final long totalRowCount = data.getRowCount();
while (iter.hasNext()) {
rowCount++;
RegressionTrainingRow row = iter.next();
exec.checkCanceled();
exec.setProgress(rowCount / (double) totalRowCount, "Row " + rowCount + "/" + totalRowCount);
x.setEntry(0, 0, 1);
x.setSubMatrix(row.getParameter().getData(), 0, 1);
for (int k = 0; k < tcC - 1; k++) {
RealMatrix betaITx = x.multiply(beta.getSubMatrix(0, 0, k * (rC + 1), (k + 1) * (rC + 1) - 1).transpose());
eBetaTx.setEntry(0, k, Math.exp(betaITx.getEntry(0, 0)));
}
double sumEBetaTx = 0;
for (int k = 0; k < tcC - 1; k++) {
sumEBetaTx += eBetaTx.getEntry(0, k);
}
for (int k = 0; k < tcC - 1; k++) {
double pik = eBetaTx.getEntry(0, k) / (1 + sumEBetaTx);
pi.setEntry(0, k, pik);
}
// fill the diagonal blocks of matrix xTwx (k = k')
for (int k = 0; k < tcC - 1; k++) {
for (int i = 0; i < rC + 1; i++) {
for (int ii = i; ii < rC + 1; ii++) {
int o = k * (rC + 1);
double v = xTwx.getEntry(o + i, o + ii);
double w = pi.getEntry(0, k) * (1 - pi.getEntry(0, k));
v += x.getEntry(0, i) * w * x.getEntry(0, ii);
xTwx.setEntry(o + i, o + ii, v);
xTwx.setEntry(o + ii, o + i, v);
}
}
}
// fill the rest of xTwx (k != k')
for (int k = 0; k < tcC - 1; k++) {
for (int kk = k + 1; kk < tcC - 1; kk++) {
for (int i = 0; i < rC + 1; i++) {
for (int ii = i; ii < rC + 1; ii++) {
int o1 = k * (rC + 1);
int o2 = kk * (rC + 1);
double v = xTwx.getEntry(o1 + i, o2 + ii);
double w = -pi.getEntry(0, k) * pi.getEntry(0, kk);
v += x.getEntry(0, i) * w * x.getEntry(0, ii);
xTwx.setEntry(o1 + i, o2 + ii, v);
xTwx.setEntry(o1 + ii, o2 + i, v);
xTwx.setEntry(o2 + ii, o1 + i, v);
xTwx.setEntry(o2 + i, o1 + ii, v);
}
}
}
}
int g = (int) row.getTarget();
// fill matrix xTyu
for (int k = 0; k < tcC - 1; k++) {
for (int i = 0; i < rC + 1; i++) {
int o = k * (rC + 1);
double v = xTyu.getEntry(o + i, 0);
double y = k == g ? 1 : 0;
v += (y - pi.getEntry(0, k)) * x.getEntry(0, i);
xTyu.setEntry(o + i, 0, v);
}
}
}
if (m_penaltyTerm > 0.0) {
RealMatrix stdError = getStdErrorMatrix(xTwx);
// do not penalize the constant terms
for (int i = 0; i < tcC - 1; i++) {
stdError.setEntry(i * (rC + 1), i * (rC + 1), 0);
}
xTwx = xTwx.add(stdError.scalarMultiply(-0.00001));
}
exec.checkCanceled();
b = xTwx.multiply(beta.transpose()).add(xTyu);
A = xTwx;
if (rowCount < A.getColumnDimension()) {
throw new IllegalStateException("The dataset must have at least " + A.getColumnDimension() + " rows, but it has only " + rowCount + " rows. It is recommended to use a " + "larger dataset in order to increase accuracy.");
}
DecompositionSolver solver = new SingularValueDecomposition(A).getSolver();
// boolean isNonSingular = solver.isNonSingular();
// if (isNonSingular) {
RealMatrix betaNew = solver.solve(b);
beta.setSubMatrix(betaNew.transpose().getData(), 0, 0);
// } else {
// throw new RuntimeException(FAILING_MSG);
// }
}
Aggregations