use of gov.sandia.n2a.linear.FactorQR in project n2a by frothga.
the class OptimizerLM method step.
public boolean step() {
if (iteration == -2) {
iteration = -1;
return true;
}
if (// Collect results of first job. This establishes number of rows in Jacobian.
iteration == -1) {
OutputParser.Column series = getSeries(baseIndex - 1);
int m = series.startRow + series.values.size();
y = new MatrixDense(m, 1);
for (int r = 0; r < m; r++) y.set(r, series.get(r));
ynorm = y.norm(2);
iteration = 0;
// Collect the base sample. Note that in subsequent iterations, the base sample will come from the last step of lmpar.
sample = -1;
return true;
}
if (iteration > maxIterations)
return false;
sample++;
int n = variables.size();
// Collect remaining samples needed to build Jacobian.
if (sample < n)
return true;
// If all samples have completed, build Jacobian.
int m = y.rows();
if (// Only done once per iteration.
J == null) {
J = new MatrixDense(m, n);
for (int c = 0; c < n; c++) {
OutputParser.Column series = getSeries(baseIndex + c);
double h = H.get(c);
double norm = 0;
for (int r = 0; r < m; r++) {
double value = (series.get(r) - y.get(r)) / h;
J.set(r, c, value);
norm += value * value;
}
Jnorms.set(c, Math.sqrt(norm));
}
if (iteration == 0) {
// Scale according to the norms of the columns of the initial Jacobian.
for (int j = 0; j < n; j++) {
double scale = Jnorms.get(j);
if (scale == 0)
scale = 1;
scales.set(j, scale);
}
xnorm = ((MatrixDense) x.multiplyElementwise(scales)).norm(2);
if (xnorm == 0)
delta = 1;
else
delta = xnorm;
}
}
// Factorize J
FactorQR qr = new FactorQR(J);
MatrixDense Qy = qr.getQ().transpose().multiply(y);
// Exact convergence (unlikely).
if (ynorm == 0)
return false;
double gnorm = 0;
for (int j = 0; j < n; j++) {
double jnorm = Jnorms.get(j);
if (jnorm == 0)
continue;
// equivalent to ~J * y using the original J. (That is, ~R * ~Q * y, where J = QR)
double temp = J.getColumn(j).dot(Qy);
// infinity norm of g = ~J * y / |y| with some additional scaling
gnorm = Math.max(gnorm, Math.abs(temp / (ynorm * jnorm)));
}
// Gradient has gotten too small to follow.
if (gnorm <= toleranceG)
return false;
// rescale if necessary
for (int j = 0; j < n; j++) scales.set(j, Math.max(scales.get(j), Jnorms.get(j)));
// Inner loop of algorithm
// Search over possible step sizes until we find one that gives acceptable improvement
// STOPPED HERE --------------------------------------
iteration++;
return true;
}
Aggregations