use of mpicbg.models.TranslationModel2D in project TrakEM2 by trakem2.
the class StitchingTEM method stitchTopLeft.
/**
* Stitch array of patches with upper left rule
*
* @param patch
* @param grid_width
* @param default_bottom_top_overlap
* @param default_left_right_overlap
* @param optimize
* @return
*/
private static Runnable stitchTopLeft(final Patch[] patch, final int grid_width, final double default_bottom_top_overlap, final double default_left_right_overlap, final boolean optimize, final PhaseCorrelationParam param) {
return new Runnable() {
@Override
public void run() {
try {
final int LEFT = 0, TOP = 1;
int prev_i = 0;
int prev = LEFT;
double[] R1 = null, R2 = null;
// for minimization:
final ArrayList<AbstractAffineTile2D<?>> al_tiles = new ArrayList<AbstractAffineTile2D<?>>();
// first patch-tile:
final TranslationModel2D first_tile_model = new TranslationModel2D();
// first_tile_model.getAffine().setTransform( patch[ 0 ].getAffineTransform() );
first_tile_model.set((float) patch[0].getAffineTransform().getTranslateX(), (float) patch[0].getAffineTransform().getTranslateY());
al_tiles.add(new TranslationTile2D(first_tile_model, patch[0]));
for (int i = 1; i < patch.length; i++) {
if (Thread.currentThread().isInterrupted()) {
return;
}
// boundary checks: don't allow displacements beyond these values
final double default_dx = default_left_right_overlap;
final double default_dy = default_bottom_top_overlap;
// for minimization:
AbstractAffineTile2D<?> tile_left = null;
AbstractAffineTile2D<?> tile_top = null;
final TranslationModel2D tile_model = new TranslationModel2D();
// tile_model.getAffine().setTransform( patch[ i ].getAffineTransform() );
tile_model.set((float) patch[i].getAffineTransform().getTranslateX(), (float) patch[i].getAffineTransform().getTranslateY());
final AbstractAffineTile2D<?> tile = new TranslationTile2D(tile_model, patch[i]);
al_tiles.add(tile);
// stitch with the one above if starting row
if (0 == i % grid_width) {
prev_i = i - grid_width;
prev = TOP;
} else {
prev_i = i - 1;
prev = LEFT;
}
if (TOP == prev) {
// compare with top only
R1 = correlate(patch[prev_i], patch[i], param.overlap, param.cc_scale, TOP_BOTTOM, default_dx, default_dy, param.min_R);
R2 = null;
tile_top = al_tiles.get(i - grid_width);
} else {
// the one on the left
R2 = correlate(patch[prev_i], patch[i], param.overlap, param.cc_scale, LEFT_RIGHT, default_dx, default_dy, param.min_R);
tile_left = al_tiles.get(i - 1);
// the one above
if (i - grid_width > -1) {
R1 = correlate(patch[i - grid_width], patch[i], param.overlap, param.cc_scale, TOP_BOTTOM, default_dx, default_dy, param.min_R);
tile_top = al_tiles.get(i - grid_width);
} else {
R1 = null;
}
}
// boundary limits: don't move by more than the small dimension of the stripe
// TODO: only the dx for left (and the dy for top) should be compared and found to be smaller or equal; the other dimension should be unbounded -for example, for manually acquired, grossly out-of-grid tiles.
final int max_abs_delta;
final Rectangle box = new Rectangle();
final Rectangle box2 = new Rectangle();
// check and apply: falls back to default overlaps when getting bad results
if (TOP == prev) {
if (SUCCESS == R1[2]) {
// trust top
if (optimize)
addMatches(tile_top, tile, R1[0], R1[1]);
else {
patch[i - grid_width].getBoundingBox(box);
patch[i].setLocation(box.x + R1[0], box.y + R1[1]);
}
} else {
final Rectangle b2 = patch[i - grid_width].getBoundingBox(null);
// don't move: use default overlap
if (optimize)
addMatches(tile_top, tile, 0, b2.height - default_bottom_top_overlap);
else {
patch[i - grid_width].getBoundingBox(box);
patch[i].setLocation(box.x, box.y + b2.height - default_bottom_top_overlap);
}
}
} else {
// the one on top, if any
if (i - grid_width > -1) {
if (SUCCESS == R1[2]) {
// top is good
if (SUCCESS == R2[2]) {
// combine left and top
if (optimize) {
addMatches(tile_left, tile, R2[0], R2[1]);
addMatches(tile_top, tile, R1[0], R1[1]);
} else {
patch[i - 1].getBoundingBox(box);
patch[i - grid_width].getBoundingBox(box2);
patch[i].setLocation((box.x + R1[0] + box2.x + R2[0]) / 2, (box.y + R1[1] + box2.y + R2[1]) / 2);
}
} else {
// use top alone
if (optimize)
addMatches(tile_top, tile, R1[0], R1[1]);
else {
patch[i - grid_width].getBoundingBox(box);
patch[i].setLocation(box.x + R1[0], box.y + R1[1]);
}
}
} else {
// ignore top
if (SUCCESS == R2[2]) {
// use left alone
if (optimize)
addMatches(tile_left, tile, R2[0], R2[1]);
else {
patch[i - 1].getBoundingBox(box);
patch[i].setLocation(box.x + R2[0], box.y + R2[1]);
}
} else {
patch[prev_i].getBoundingBox(box);
patch[i - grid_width].getBoundingBox(box2);
// left not trusted, top not trusted: use a combination of defaults for both
if (optimize) {
addMatches(tile_left, tile, box.width - default_left_right_overlap, 0);
addMatches(tile_top, tile, 0, box2.height - default_bottom_top_overlap);
} else {
patch[i].setLocation(box.x + box.width - default_left_right_overlap, box2.y + box2.height - default_bottom_top_overlap);
}
}
}
} else if (SUCCESS == R2[2]) {
// use left alone (top not applicable in top row)
if (optimize)
addMatches(tile_left, tile, R2[0], R2[1]);
else {
patch[i - 1].getBoundingBox(box);
patch[i].setLocation(box.x + R2[0], box.y + R2[1]);
}
} else {
patch[prev_i].getBoundingBox(box);
// left not trusted, and top not applicable: use default overlap with left tile
if (optimize)
addMatches(tile_left, tile, box.width - default_left_right_overlap, 0);
else {
patch[i].setLocation(box.x + box.width - default_left_right_overlap, box.y);
}
}
}
if (!optimize)
Display.repaint(patch[i].getLayer(), patch[i], null, 0, false);
Utils.log2(i + ": Done patch " + patch[i]);
}
if (optimize) {
final ArrayList<AbstractAffineTile2D<?>> al_fixed_tiles = new ArrayList<AbstractAffineTile2D<?>>();
// Add locked tiles as fixed tiles, if any:
for (int i = 0; i < patch.length; i++) {
if (patch[i].isLocked2())
al_fixed_tiles.add(al_tiles.get(i));
}
if (al_fixed_tiles.isEmpty()) {
// When none, add the first one as fixed
al_fixed_tiles.add(al_tiles.get(0));
}
// Optimize iteratively tile configuration by removing bad matches
optimizeTileConfiguration(al_tiles, al_fixed_tiles, param);
for (final AbstractAffineTile2D<?> t : al_tiles) t.getPatch().setAffineTransform(t.getModel().createAffine());
}
// Remove or hide disconnected tiles
if (param.hide_disconnected || param.remove_disconnected) {
final List<Set<Tile<?>>> graphs = AbstractAffineTile2D.identifyConnectedGraphs(al_tiles);
final List<AbstractAffineTile2D<?>> interestingTiles;
// find largest graph
Set<Tile<?>> largestGraph = null;
for (final Set<Tile<?>> graph : graphs) if (largestGraph == null || largestGraph.size() < graph.size())
largestGraph = graph;
Utils.log("Size of largest stitching graph = " + largestGraph.size());
interestingTiles = new ArrayList<AbstractAffineTile2D<?>>();
for (final Tile<?> t : largestGraph) interestingTiles.add((AbstractAffineTile2D<?>) t);
if (param.hide_disconnected)
for (final AbstractAffineTile2D<?> t : al_tiles) if (!interestingTiles.contains(t))
t.getPatch().setVisible(false);
if (param.remove_disconnected)
for (final AbstractAffineTile2D<?> t : al_tiles) if (!interestingTiles.contains(t))
t.getPatch().remove(false);
}
// all
Display.repaint(patch[0].getLayer(), null, 0, true);
//
} catch (final Exception e) {
IJError.print(e);
}
}
};
}
use of mpicbg.models.TranslationModel2D in project TrakEM2 by trakem2.
the class ElasticLayerAlignment method exec.
/**
* @param param
* @param layerRange
* @param fixedLayers
* @param emptyLayers
* @param box
* @param filter
* @param useTps true if using TPS transforms, otherwise MLS
* @throws Exception
*/
@SuppressWarnings("deprecation")
public final void exec(final Param param, final Project project, final List<Layer> layerRange, final Set<Layer> fixedLayers, final Set<Layer> emptyLayers, final Rectangle box, final boolean propagateTransformBefore, final boolean propagateTransformAfter, final Filter<Patch> filter) throws Exception {
final ExecutorService service = ExecutorProvider.getExecutorService(1.0f);
/* create tiles and models for all layers */
final ArrayList<Tile<?>> tiles = new ArrayList<Tile<?>>();
for (int i = 0; i < layerRange.size(); ++i) {
switch(param.desiredModelIndex) {
case 0:
tiles.add(new Tile<TranslationModel2D>(new TranslationModel2D()));
break;
case 1:
tiles.add(new Tile<RigidModel2D>(new RigidModel2D()));
break;
case 2:
tiles.add(new Tile<SimilarityModel2D>(new SimilarityModel2D()));
break;
case 3:
tiles.add(new Tile<AffineModel2D>(new AffineModel2D()));
break;
case 4:
tiles.add(new Tile<HomographyModel2D>(new HomographyModel2D()));
break;
default:
return;
}
}
/* collect all pairs of slices for which a model could be found */
final ArrayList<Triple<Integer, Integer, AbstractModel<?>>> pairs = new ArrayList<Triple<Integer, Integer, AbstractModel<?>>>();
if (!param.isAligned) {
preAlignStack(param, project, layerRange, box, filter, pairs);
} else {
for (int i = 0; i < layerRange.size(); ++i) {
final int range = Math.min(layerRange.size(), i + param.maxNumNeighbors + 1);
for (int j = i + 1; j < range; ++j) {
pairs.add(new Triple<Integer, Integer, AbstractModel<?>>(i, j, new TranslationModel2D()));
}
}
}
/* Elastic alignment */
/* Initialization */
final TileConfiguration initMeshes = new TileConfiguration();
final int meshWidth = (int) Math.ceil(box.width * param.layerScale);
final int meshHeight = (int) Math.ceil(box.height * param.layerScale);
final ArrayList<SpringMesh> meshes = new ArrayList<SpringMesh>(layerRange.size());
for (int i = 0; i < layerRange.size(); ++i) {
meshes.add(new SpringMesh(param.resolutionSpringMesh, meshWidth, meshHeight, param.stiffnessSpringMesh, param.maxStretchSpringMesh * param.layerScale, param.dampSpringMesh));
}
// final int blockRadius = Math.max( 32, meshWidth / p.resolutionSpringMesh / 2 );
final int blockRadius = Math.max(16, mpicbg.util.Util.roundPos(param.layerScale * param.blockRadius));
Utils.log("effective block radius = " + blockRadius);
final ArrayList<Future<BlockMatchPairCallable.BlockMatchResults>> futures = new ArrayList<Future<BlockMatchPairCallable.BlockMatchResults>>(pairs.size());
for (final Triple<Integer, Integer, AbstractModel<?>> pair : pairs) {
/* free memory */
project.getLoader().releaseAll();
final SpringMesh m1 = meshes.get(pair.a);
final SpringMesh m2 = meshes.get(pair.b);
final ArrayList<Vertex> v1 = m1.getVertices();
final ArrayList<Vertex> v2 = m2.getVertices();
final Layer layer1 = layerRange.get(pair.a);
final Layer layer2 = layerRange.get(pair.b);
final boolean layer1Fixed = fixedLayers.contains(layer1);
final boolean layer2Fixed = fixedLayers.contains(layer2);
if (!(layer1Fixed && layer2Fixed)) {
final BlockMatchPairCallable bmpc = new BlockMatchPairCallable(pair, layerRange, layer1Fixed, layer2Fixed, filter, param, v1, v2, box);
futures.add(service.submit(bmpc));
}
}
for (final Future<BlockMatchPairCallable.BlockMatchResults> future : futures) {
final BlockMatchPairCallable.BlockMatchResults results = future.get();
final Collection<PointMatch> pm12 = results.pm12, pm21 = results.pm21;
final Triple<Integer, Integer, AbstractModel<?>> pair = results.pair;
final Tile<?> t1 = tiles.get(pair.a);
final Tile<?> t2 = tiles.get(pair.b);
final SpringMesh m1 = meshes.get(pair.a);
final SpringMesh m2 = meshes.get(pair.b);
final double springConstant = 1.0 / (pair.b - pair.a);
final boolean layer1Fixed = results.layer1Fixed;
final boolean layer2Fixed = results.layer2Fixed;
if (layer1Fixed) {
initMeshes.fixTile(t1);
} else {
if (param.useLocalSmoothnessFilter) {
Utils.log(pair.a + " > " + pair.b + ": " + pm12.size() + " candidates passed local smoothness filter.");
} else {
Utils.log(pair.a + " > " + pair.b + ": found " + pm12.size() + " correspondences.");
}
for (final PointMatch pm : pm12) {
final Vertex p1 = (Vertex) pm.getP1();
final Vertex p2 = new Vertex(pm.getP2());
p1.addSpring(p2, new Spring(0, springConstant));
m2.addPassiveVertex(p2);
}
/*
* adding Tiles to the initialing TileConfiguration, adding a Tile
* multiple times does not harm because the TileConfiguration is
* backed by a Set.
*/
if (pm12.size() > pair.c.getMinNumMatches()) {
initMeshes.addTile(t1);
initMeshes.addTile(t2);
t1.connect(t2, pm12);
}
}
if (layer2Fixed)
initMeshes.fixTile(t2);
else {
if (param.useLocalSmoothnessFilter) {
Utils.log(pair.a + " < " + pair.b + ": " + pm21.size() + " candidates passed local smoothness filter.");
} else {
Utils.log(pair.a + " < " + pair.b + ": found " + pm21.size() + " correspondences.");
}
for (final PointMatch pm : pm21) {
final Vertex p1 = (Vertex) pm.getP1();
final Vertex p2 = new Vertex(pm.getP2());
p1.addSpring(p2, new Spring(0, springConstant));
m1.addPassiveVertex(p2);
}
/*
* adding Tiles to the initialing TileConfiguration, adding a Tile
* multiple times does not harm because the TileConfiguration is
* backed by a Set.
*/
if (pm21.size() > pair.c.getMinNumMatches()) {
initMeshes.addTile(t1);
initMeshes.addTile(t2);
t2.connect(t1, pm21);
}
}
Utils.log(pair.a + " <> " + pair.b + " spring constant = " + springConstant);
}
/* pre-align by optimizing a piecewise linear model */
initMeshes.optimize(param.maxEpsilon * param.layerScale, param.maxIterationsSpringMesh, param.maxPlateauwidthSpringMesh);
for (int i = 0; i < layerRange.size(); ++i) meshes.get(i).init(tiles.get(i).getModel());
/* optimize the meshes */
try {
final long t0 = System.currentTimeMillis();
Utils.log("Optimizing spring meshes...");
if (param.useLegacyOptimizer) {
Utils.log(" ...using legacy optimizer...");
SpringMesh.optimizeMeshes2(meshes, param.maxEpsilon * param.layerScale, param.maxIterationsSpringMesh, param.maxPlateauwidthSpringMesh, param.visualize);
} else {
SpringMesh.optimizeMeshes(meshes, param.maxEpsilon * param.layerScale, param.maxIterationsSpringMesh, param.maxPlateauwidthSpringMesh, param.visualize);
}
Utils.log("Done optimizing spring meshes. Took " + (System.currentTimeMillis() - t0) + " ms");
} catch (final NotEnoughDataPointsException e) {
Utils.log("There were not enough data points to get the spring mesh optimizing.");
e.printStackTrace();
return;
}
/* translate relative to bounding box */
for (final SpringMesh mesh : meshes) {
for (final PointMatch pm : mesh.getVA().keySet()) {
final Point p1 = pm.getP1();
final Point p2 = pm.getP2();
final double[] l = p1.getL();
final double[] w = p2.getW();
l[0] = l[0] / param.layerScale + box.x;
l[1] = l[1] / param.layerScale + box.y;
w[0] = w[0] / param.layerScale + box.x;
w[1] = w[1] / param.layerScale + box.y;
}
}
/* free memory */
project.getLoader().releaseAll();
final Layer first = layerRange.get(0);
final List<Layer> layers = first.getParent().getLayers();
final LayerSet ls = first.getParent();
final Area infArea = AreaUtils.infiniteArea();
final List<VectorData> vectorData = new ArrayList<VectorData>();
for (final Layer layer : ls.getLayers()) {
vectorData.addAll(Utils.castCollection(layer.getDisplayables(VectorData.class, false, true), VectorData.class, true));
}
vectorData.addAll(Utils.castCollection(ls.getZDisplayables(VectorData.class, true), VectorData.class, true));
/* transfer layer transform into patch transforms and append to patches */
if (propagateTransformBefore || propagateTransformAfter) {
if (propagateTransformBefore) {
final ThinPlateSplineTransform tps = makeTPS(meshes.get(0).getVA().keySet());
final int firstLayerIndex = first.getParent().getLayerIndex(first.getId());
for (int i = 0; i < firstLayerIndex; ++i) {
applyTransformToLayer(layers.get(i), tps, filter);
for (final VectorData vd : vectorData) {
vd.apply(layers.get(i), infArea, tps);
}
}
}
if (propagateTransformAfter) {
final Layer last = layerRange.get(layerRange.size() - 1);
final CoordinateTransform ct;
if (param.useTps)
ct = makeTPS(meshes.get(meshes.size() - 1).getVA().keySet());
else {
final MovingLeastSquaresTransform2 mls = new MovingLeastSquaresTransform2();
mls.setMatches(meshes.get(meshes.size() - 1).getVA().keySet());
ct = mls;
}
final int lastLayerIndex = last.getParent().getLayerIndex(last.getId());
for (int i = lastLayerIndex + 1; i < layers.size(); ++i) {
applyTransformToLayer(layers.get(i), ct, filter);
for (final VectorData vd : vectorData) {
vd.apply(layers.get(i), infArea, ct);
}
}
}
}
for (int l = 0; l < layerRange.size(); ++l) {
IJ.showStatus("Applying transformation to patches ...");
IJ.showProgress(0, layerRange.size());
final Layer layer = layerRange.get(l);
final ThinPlateSplineTransform tps = makeTPS(meshes.get(l).getVA().keySet());
applyTransformToLayer(layer, tps, filter);
for (final VectorData vd : vectorData) {
vd.apply(layer, infArea, tps);
}
if (Thread.interrupted()) {
Utils.log("Interrupted during applying transformations to patches. No all patches have been updated. Re-generate mipmaps manually.");
}
IJ.showProgress(l + 1, layerRange.size());
}
/* update patch mipmaps */
final int firstLayerIndex;
final int lastLayerIndex;
if (propagateTransformBefore)
firstLayerIndex = 0;
else {
firstLayerIndex = first.getParent().getLayerIndex(first.getId());
}
if (propagateTransformAfter)
lastLayerIndex = layers.size() - 1;
else {
final Layer last = layerRange.get(layerRange.size() - 1);
lastLayerIndex = last.getParent().getLayerIndex(last.getId());
}
for (int i = firstLayerIndex; i <= lastLayerIndex; ++i) {
final Layer layer = layers.get(i);
if (!(emptyLayers.contains(layer) || fixedLayers.contains(layer))) {
for (final Patch patch : AlignmentUtils.filterPatches(layer, filter)) patch.updateMipMaps();
}
}
Utils.log("Done.");
}
use of mpicbg.models.TranslationModel2D in project TrakEM2 by trakem2.
the class ElasticMontage method exec.
@SuppressWarnings("deprecation")
public final void exec(final Param param, final List<Patch> patches, final Set<Patch> fixedPatches) throws Exception {
/* free memory */
patches.get(0).getProject().getLoader().releaseAll();
/* create tiles and models for all patches */
final ArrayList<AbstractAffineTile2D<?>> tiles = new ArrayList<AbstractAffineTile2D<?>>();
final ArrayList<AbstractAffineTile2D<?>> fixedTiles = new ArrayList<AbstractAffineTile2D<?>>();
Align.tilesFromPatches(param.po, patches, fixedPatches, tiles, fixedTiles);
if (!param.isAligned) {
Align.alignTiles(param.po, tiles, fixedTiles, param.tilesAreInPlace, param.maxNumThreads);
/* Apply the estimated affine transform to patches */
for (final AbstractAffineTile2D<?> t : tiles) t.getPatch().setAffineTransform(t.createAffine());
Display.update();
}
/* generate tile pairs for all by now overlapping tiles */
final ArrayList<AbstractAffineTile2D<?>[]> tilePairs = new ArrayList<AbstractAffineTile2D<?>[]>();
AbstractAffineTile2D.pairOverlappingTiles(tiles, tilePairs);
/* check if there was any pair */
if (tilePairs.size() == 0) {
Utils.log("Elastic montage could not find any overlapping patches after pre-montaging.");
return;
}
Utils.log(tilePairs.size() + " pairs of patches will be block-matched...");
/* make pairwise global models local */
final ArrayList<Triple<AbstractAffineTile2D<?>, AbstractAffineTile2D<?>, InvertibleCoordinateTransform>> pairs = new ArrayList<Triple<AbstractAffineTile2D<?>, AbstractAffineTile2D<?>, InvertibleCoordinateTransform>>();
/*
* The following casting madness is necessary to get this code compiled
* with Sun/Oracle Java 6 which otherwise generates an inconvertible
* type exception.
*
* TODO Remove as soon as this bug is fixed in Sun/Oracle javac.
*/
for (final AbstractAffineTile2D<?>[] pair : tilePairs) {
final AbstractAffineModel2D<?> m;
switch(param.po.desiredModelIndex) {
case 0:
final TranslationModel2D t = (TranslationModel2D) (Object) pair[1].getModel().createInverse();
t.concatenate((TranslationModel2D) (Object) pair[0].getModel());
m = t;
break;
case 1:
final RigidModel2D r = (RigidModel2D) (Object) pair[1].getModel().createInverse();
r.concatenate((RigidModel2D) (Object) pair[0].getModel());
m = r;
break;
case 2:
final SimilarityModel2D s = (SimilarityModel2D) (Object) pair[1].getModel().createInverse();
s.concatenate((SimilarityModel2D) (Object) pair[0].getModel());
m = s;
break;
case 3:
final AffineModel2D a = (AffineModel2D) (Object) pair[1].getModel().createInverse();
a.concatenate((AffineModel2D) (Object) pair[0].getModel());
m = a;
break;
default:
m = null;
}
pairs.add(new Triple<AbstractAffineTile2D<?>, AbstractAffineTile2D<?>, InvertibleCoordinateTransform>(pair[0], pair[1], m));
}
/* Elastic alignment */
/* Initialization */
final double springTriangleHeightTwice = 2 * Math.sqrt(0.75 * param.springLengthSpringMesh * param.springLengthSpringMesh);
final ArrayList<SpringMesh> meshes = new ArrayList<SpringMesh>(tiles.size());
final HashMap<AbstractAffineTile2D<?>, SpringMesh> tileMeshMap = new HashMap<AbstractAffineTile2D<?>, SpringMesh>();
for (final AbstractAffineTile2D<?> tile : tiles) {
final double w = tile.getWidth();
final double h = tile.getHeight();
final int numX = Math.max(2, (int) Math.ceil(w / param.springLengthSpringMesh) + 1);
final int numY = Math.max(2, (int) Math.ceil(h / springTriangleHeightTwice) + 1);
final double wMesh = (numX - 1) * param.springLengthSpringMesh;
final double hMesh = (numY - 1) * springTriangleHeightTwice;
final SpringMesh mesh = new SpringMesh(numX, numY, wMesh, hMesh, param.stiffnessSpringMesh, param.maxStretchSpringMesh * param.bmScale, param.dampSpringMesh);
meshes.add(mesh);
tileMeshMap.put(tile, mesh);
}
// final int blockRadius = Math.max( 32, Util.roundPos( param.springLengthSpringMesh / 2 ) );
final int blockRadius = Math.max(Util.roundPos(16 / param.bmScale), param.bmBlockRadius);
/**
* TODO set this something more than the largest error by the approximate model
*/
final int searchRadius = param.bmSearchRadius;
final AbstractModel<?> localSmoothnessFilterModel = mpicbg.trakem2.align.Util.createModel(param.bmLocalModelIndex);
for (final Triple<AbstractAffineTile2D<?>, AbstractAffineTile2D<?>, InvertibleCoordinateTransform> pair : pairs) {
final AbstractAffineTile2D<?> t1 = pair.a;
final AbstractAffineTile2D<?> t2 = pair.b;
final SpringMesh m1 = tileMeshMap.get(t1);
final SpringMesh m2 = tileMeshMap.get(t2);
final ArrayList<PointMatch> pm12 = new ArrayList<PointMatch>();
final ArrayList<PointMatch> pm21 = new ArrayList<PointMatch>();
final ArrayList<Vertex> v1 = m1.getVertices();
final ArrayList<Vertex> v2 = m2.getVertices();
final String patchName1 = patchName(t1.getPatch());
final String patchName2 = patchName(t2.getPatch());
final PatchImage pi1 = t1.getPatch().createTransformedImage();
if (pi1 == null) {
Utils.log("Patch `" + patchName1 + "' failed generating a transformed image. Skipping...");
continue;
}
final PatchImage pi2 = t2.getPatch().createTransformedImage();
if (pi2 == null) {
Utils.log("Patch `" + patchName2 + "' failed generating a transformed image. Skipping...");
continue;
}
final FloatProcessor fp1 = (FloatProcessor) pi1.target.convertToFloat();
final ByteProcessor mask1 = pi1.getMask();
final FloatProcessor fpMask1 = mask1 == null ? null : scaleByte(mask1);
final FloatProcessor fp2 = (FloatProcessor) pi2.target.convertToFloat();
final ByteProcessor mask2 = pi2.getMask();
final FloatProcessor fpMask2 = mask2 == null ? null : scaleByte(mask2);
if (!fixedTiles.contains(t1)) {
BlockMatching.matchByMaximalPMCC(fp1, fp2, fpMask1, fpMask2, param.bmScale, pair.c, blockRadius, blockRadius, searchRadius, searchRadius, param.bmMinR, param.bmRodR, param.bmMaxCurvatureR, v1, pm12, new ErrorStatistic(1));
if (param.bmUseLocalSmoothnessFilter) {
Utils.log("`" + patchName1 + "' > `" + patchName2 + "': found " + pm12.size() + " correspondence candidates.");
localSmoothnessFilterModel.localSmoothnessFilter(pm12, pm12, param.bmLocalRegionSigma, param.bmMaxLocalEpsilon, param.bmMaxLocalTrust);
Utils.log("`" + patchName1 + "' > `" + patchName2 + "': " + pm12.size() + " candidates passed local smoothness filter.");
} else {
Utils.log("`" + patchName1 + "' > `" + patchName2 + "': found " + pm12.size() + " correspondences.");
}
} else {
Utils.log("Skipping fixed patch `" + patchName1 + "'.");
}
if (!fixedTiles.contains(t2)) {
BlockMatching.matchByMaximalPMCC(fp2, fp1, fpMask2, fpMask1, param.bmScale, pair.c.createInverse(), blockRadius, blockRadius, searchRadius, searchRadius, param.bmMinR, param.bmRodR, param.bmMaxCurvatureR, v2, pm21, new ErrorStatistic(1));
if (param.bmUseLocalSmoothnessFilter) {
Utils.log("`" + patchName1 + "' < `" + patchName2 + "': found " + pm21.size() + " correspondence candidates.");
localSmoothnessFilterModel.localSmoothnessFilter(pm21, pm21, param.bmLocalRegionSigma, param.bmMaxLocalEpsilon, param.bmMaxLocalTrust);
Utils.log("`" + patchName1 + "' < `" + patchName2 + "': " + pm21.size() + " candidates passed local smoothness filter.");
} else {
Utils.log("`" + patchName1 + "' < `" + patchName2 + "': found " + pm21.size() + " correspondences.");
}
} else {
Utils.log("Skipping fixed patch `" + patchName2 + "'.");
}
for (final PointMatch pm : pm12) {
final Vertex p1 = (Vertex) pm.getP1();
final Vertex p2 = new Vertex(pm.getP2());
p1.addSpring(p2, new Spring(0, 1.0f));
m2.addPassiveVertex(p2);
}
for (final PointMatch pm : pm21) {
final Vertex p1 = (Vertex) pm.getP1();
final Vertex p2 = new Vertex(pm.getP2());
p1.addSpring(p2, new Spring(0, 1.0f));
m1.addPassiveVertex(p2);
}
}
/* initialize */
for (final Map.Entry<AbstractAffineTile2D<?>, SpringMesh> entry : tileMeshMap.entrySet()) entry.getValue().init(entry.getKey().getModel());
/* optimize the meshes */
try {
final long t0 = System.currentTimeMillis();
IJ.log("Optimizing spring meshes...");
if (param.useLegacyOptimizer) {
Utils.log(" ...using legacy optimizer...");
SpringMesh.optimizeMeshes2(meshes, param.po.maxEpsilon, param.maxIterationsSpringMesh, param.maxPlateauwidthSpringMesh, param.visualize);
} else {
SpringMesh.optimizeMeshes(meshes, param.po.maxEpsilon, param.maxIterationsSpringMesh, param.maxPlateauwidthSpringMesh, param.visualize);
}
IJ.log("Done optimizing spring meshes. Took " + (System.currentTimeMillis() - t0) + " ms");
} catch (final NotEnoughDataPointsException e) {
Utils.log("There were not enough data points to get the spring mesh optimizing.");
e.printStackTrace();
return;
}
/* apply */
for (final Map.Entry<AbstractAffineTile2D<?>, SpringMesh> entry : tileMeshMap.entrySet()) {
final AbstractAffineTile2D<?> tile = entry.getKey();
if (!fixedTiles.contains(tile)) {
final Patch patch = tile.getPatch();
final SpringMesh mesh = entry.getValue();
final Set<PointMatch> matches = mesh.getVA().keySet();
Rectangle box = patch.getCoordinateTransformBoundingBox();
/* compensate for existing coordinate transform bounding box */
for (final PointMatch pm : matches) {
final Point p1 = pm.getP1();
final double[] l = p1.getL();
l[0] += box.x;
l[1] += box.y;
}
final ThinPlateSplineTransform mlt = ElasticLayerAlignment.makeTPS(matches);
patch.appendCoordinateTransform(mlt);
box = patch.getCoordinateTransformBoundingBox();
patch.getAffineTransform().setToTranslation(box.x, box.y);
patch.updateInDatabase("transform");
patch.updateBucket();
patch.updateMipMaps();
}
}
Utils.log("Done.");
}
use of mpicbg.models.TranslationModel2D in project TrakEM2 by trakem2.
the class MovingLeastSquaresTransform2 method init.
@Override
public final void init(final String data) throws NumberFormatException {
final String[] fields = data.split("\\s+");
if (fields.length > 3) {
final int n = Integer.parseInt(fields[1]);
if ((fields.length - 3) % (2 * n + 1) == 0) {
final int l = (fields.length - 3) / (2 * n + 1);
if (n == 2) {
if (fields[0].equals("translation"))
model = new TranslationModel2D();
else if (fields[0].equals("rigid"))
model = new RigidModel2D();
else if (fields[0].equals("similarity"))
model = new SimilarityModel2D();
else if (fields[0].equals("affine"))
model = new AffineModel2D();
else
throw new NumberFormatException("Inappropriate parameters for " + this.getClass().getCanonicalName());
} else if (n == 3) {
if (fields[0].equals("affine"))
model = new AffineModel3D();
else
throw new NumberFormatException("Inappropriate parameters for " + this.getClass().getCanonicalName());
} else
throw new NumberFormatException("Inappropriate parameters for " + this.getClass().getCanonicalName());
alpha = Float.parseFloat(fields[2]);
p = new float[n][l];
q = new float[n][l];
w = new float[l];
int i = 2, j = 0;
while (i < fields.length - 1) {
for (int d = 0; d < n; ++d) p[d][j] = Float.parseFloat(fields[++i]);
for (int d = 0; d < n; ++d) q[d][j] = Float.parseFloat(fields[++i]);
w[j] = Float.parseFloat(fields[++i]);
++j;
}
} else
throw new NumberFormatException("Inappropriate parameters for " + this.getClass().getCanonicalName());
} else
throw new NumberFormatException("Inappropriate parameters for " + this.getClass().getCanonicalName());
}
use of mpicbg.models.TranslationModel2D in project TrakEM2 by trakem2.
the class ManualAlignMode method apply.
@Override
public boolean apply() {
// Check there's more than one layer
if (m.size() < 2) {
Utils.showMessage("Need more than one layer to align!");
return false;
}
// Check that the current layer is one of the layers with landmarks.
// Will be used as reference
final Layer ref_layer = display.getLayer();
if (null == m.get(ref_layer)) {
Utils.showMessage("Please scroll to a layer with landmarks,\nto be used as reference.");
return false;
}
// Check that all layers have the same number of landmarks
int n_landmarks = -1;
for (final Map.Entry<Layer, Landmarks> e : m.entrySet()) {
final Landmarks lm = e.getValue();
if (-1 == n_landmarks) {
n_landmarks = lm.points.size();
continue;
}
if (n_landmarks != lm.points.size()) {
Utils.showMessage("Can't apply: there are different amounts of landmarks per layer.\nSee the log window.");
for (final Map.Entry<Layer, Landmarks> ee : m.entrySet()) {
Utils.log(ee.getValue().points.size() + " landmarks in layer " + ee.getKey());
}
return false;
}
}
// Sort Layers by Z
final TreeMap<Layer, Landmarks> sorted = new TreeMap<Layer, Landmarks>(new Comparator<Layer>() {
@Override
public boolean equals(final Object ob) {
return this == ob;
}
@Override
public int compare(final Layer l1, final Layer l2) {
// Ascending order
final double dz = l1.getZ() - l2.getZ();
if (dz < 0)
return -1;
else if (dz > 0)
return 1;
else
return 0;
}
});
sorted.putAll(m);
int iref = 0;
for (final Layer la : sorted.keySet()) {
if (la != ref_layer)
iref++;
else
break;
}
// Ok, now ask for a model
final GenericDialog gd = new GenericDialog("Model");
gd.addChoice("Model:", Align.Param.modelStrings, Align.Param.modelStrings[1]);
gd.addCheckbox("Propagate to first layer", 0 != iref);
((Component) gd.getCheckboxes().get(0)).setEnabled(0 != iref);
gd.addCheckbox("Propagate to last layer", sorted.size() - 1 != iref);
((Component) gd.getCheckboxes().get(1)).setEnabled(sorted.size() - 1 != iref);
gd.showDialog();
if (gd.wasCanceled())
return false;
final int model_index = gd.getNextChoiceIndex();
final boolean propagate_to_first = gd.getNextBoolean();
final boolean propagate_to_last = gd.getNextBoolean();
int min;
// Create a model as desired
final AbstractAffineModel2D<?> model;
switch(model_index) {
case 0:
min = 1;
model = new TranslationModel2D();
break;
case 1:
min = 2;
model = new RigidModel2D();
break;
case 2:
min = 2;
model = new SimilarityModel2D();
break;
case 3:
min = 3;
model = new AffineModel2D();
break;
default:
Utils.log("Unknown model index!");
return false;
}
if (n_landmarks < min) {
Utils.showMessage("Need at least " + min + " landmarks for a " + Align.Param.modelStrings[model_index] + " model");
return false;
}
Bureaucrat.createAndStart(new Worker.Task("Aligning layers with landmarks") {
@Override
public void exec() {
// Find layers with landmarks, in increasing Z.
// Match in pairs.
// So, get two submaps: from ref_layer to first, and from ref_layer to last
// strictly lower Z than ref_layer
final SortedMap<Layer, Landmarks> first_chunk_ = new TreeMap<Layer, Landmarks>(sorted.headMap(ref_layer));
// .. so add ref_layer
first_chunk_.put(ref_layer, m.get(ref_layer));
// equal or larger Z than ref_layer
final SortedMap<Layer, Landmarks> second_chunk = sorted.tailMap(ref_layer);
final SortedMap<Layer, Landmarks> first_chunk;
// Reverse order of first_chunk
if (first_chunk_.size() > 1) {
final SortedMap<Layer, Landmarks> fc = new TreeMap<Layer, Landmarks>(new Comparator<Layer>() {
@Override
public boolean equals(final Object ob) {
return this == ob;
}
@Override
public int compare(final Layer l1, final Layer l2) {
// Descending order
final double dz = l2.getZ() - l1.getZ();
if (dz < 0)
return -1;
else if (dz > 0)
return 1;
else
return 0;
}
});
fc.putAll(first_chunk_);
first_chunk = fc;
} else {
first_chunk = first_chunk_;
}
final LayerSet ls = ref_layer.getParent();
final Collection<Layer> affected_layers = new HashSet<Layer>(m.keySet());
// Gather all Patch instances that will be affected
final ArrayList<Patch> patches = new ArrayList<Patch>();
for (final Layer la : m.keySet()) patches.addAll(la.getAll(Patch.class));
if (propagate_to_first && first_chunk.size() > 1) {
final Collection<Layer> affected = ls.getLayers().subList(0, ls.indexOf(first_chunk.lastKey()));
for (final Layer la : affected) {
patches.addAll(la.getAll(Patch.class));
}
affected_layers.addAll(affected);
}
if (propagate_to_last && second_chunk.size() > 1) {
final Collection<Layer> affected = ls.getLayers().subList(ls.indexOf(second_chunk.lastKey()) + 1, ls.size());
for (final Layer la : affected) {
patches.addAll(la.getAll(Patch.class));
}
}
// Transform segmentations along with patches
AlignTask.transformPatchesAndVectorData(patches, new Runnable() {
@Override
public void run() {
// Apply!
// TODO: when adding non-linear transforms, use this single line for undo instead of all below:
// (these transforms may be non-linear as well, which alter mipmaps.)
// ls.addTransformStepWithData(affected_layers);
// Setup undo:
// Find all images in the range of affected layers,
// plus all Displayable of those layers (but Patch instances in a separate DoTransforms step,
// to avoid adding a "data" undo for them, which would recreate mipmaps when undone).
// plus all ZDisplayable that paint in those layers
final HashSet<Displayable> ds = new HashSet<Displayable>();
final ArrayList<Displayable> patches = new ArrayList<Displayable>();
for (final Layer layer : affected_layers) {
for (final Displayable d : layer.getDisplayables()) {
if (d.getClass() == Patch.class) {
patches.add(d);
} else {
ds.add(d);
}
}
}
for (final ZDisplayable zd : ls.getZDisplayables()) {
for (final Layer layer : affected_layers) {
if (zd.paintsAt(layer)) {
ds.add((Displayable) zd);
break;
}
}
}
if (ds.size() > 0) {
final Displayable.DoEdits step = ls.addTransformStepWithData(ds);
if (patches.size() > 0) {
final ArrayList<DoStep> a = new ArrayList<DoStep>();
a.add(new Displayable.DoTransforms().addAll(patches));
step.addDependents(a);
}
}
if (first_chunk.size() > 1) {
final AffineTransform aff = align(first_chunk, model);
if (propagate_to_first) {
for (final Layer la : ls.getLayers().subList(0, ls.indexOf(first_chunk.lastKey()))) {
// exclusive last
la.apply(Patch.class, aff);
}
}
}
if (second_chunk.size() > 1) {
final AffineTransform aff = align(second_chunk, model);
if (propagate_to_last) {
for (final Layer la : ls.getLayers().subList(ls.indexOf(second_chunk.lastKey()) + 1, ls.size())) {
// exclusive last
la.apply(Patch.class, aff);
}
}
}
Display.repaint();
// Store current state
if (ds.size() > 0) {
final Displayable.DoEdits step2 = ls.addTransformStepWithData(ds);
if (patches.size() > 0) {
final ArrayList<DoStep> a2 = new ArrayList<DoStep>();
a2.add(new Displayable.DoTransforms().addAll(patches));
step2.addDependents(a2);
}
}
}
});
}
}, display.getProject());
return true;
}
Aggregations