use of mpicbg.models.AffineModel2D in project TrakEM2 by trakem2.
the class Distortion_Correction method extractSIFTPoints.
protected void extractSIFTPoints(final int index, final List<Feature>[] siftFeatures, final List<List<PointMatch>> inliers, final List<AbstractAffineModel2D<?>> models) {
// save all matching candidates
final List<List<PointMatch>> candidates = new ArrayList<List<PointMatch>>();
for (int j = 0; j < siftFeatures.length; j++) {
if (index == j)
continue;
candidates.add(FloatArray2DSIFT.createMatches(siftFeatures[index], siftFeatures[j], 1.5f, null, Float.MAX_VALUE, 0.5f));
}
// get rid of the outliers and save the transformations to match the inliers
for (int i = 0; i < candidates.size(); ++i) {
final List<PointMatch> tmpInliers = new ArrayList<PointMatch>();
final AbstractAffineModel2D<?> m;
switch(sp.expectedModelIndex) {
case 0:
m = new TranslationModel2D();
break;
case 1:
m = new RigidModel2D();
break;
case 2:
m = new SimilarityModel2D();
break;
case 3:
m = new AffineModel2D();
break;
default:
return;
}
try {
m.filterRansac(candidates.get(i), tmpInliers, 1000, sp.maxEpsilon, sp.minInlierRatio, 10);
} catch (final NotEnoughDataPointsException e) {
e.printStackTrace();
}
inliers.add(tmpInliers);
models.add(m);
}
}
use of mpicbg.models.AffineModel2D in project TrakEM2 by trakem2.
the class AlignTask method transformVectorData.
public static final void transformVectorData(final ReferenceData rd, /* The transformations of patches before alignment. */
final Collection<Displayable> vdata, /* The VectorData instances to transform along with images. */
final LayerSet target_layerset) /* The LayerSet in which the vdata and the transformed images exist. */
{
final ExecutorService exec = Utils.newFixedThreadPool("AlignTask-transformVectorData");
try {
final Collection<Future<?>> fus = new ArrayList<Future<?>>();
final HashMap<Long, Layer> lidm = new HashMap<Long, Layer>();
for (final Long lid : rd.src_layer_lids_used) {
final Layer la = target_layerset.getLayer(lid.longValue());
if (null == la) {
Utils.log("ERROR layer with id " + lid + " NOT FOUND in target layerset!");
continue;
}
lidm.put(lid, la);
}
for (final Map.Entry<Displayable, Map<Long, TreeMap<Integer, Long>>> ed : rd.underlying.entrySet()) {
// The VectorData instance to transform
final Displayable d = ed.getKey();
// Process Displayables concurrently:
fus.add(exec.submit(new Runnable() {
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public void run() {
for (final Map.Entry<Long, TreeMap<Integer, Long>> el : ed.getValue().entrySet()) {
// The entry has the id of the layer and the stack-index-ordered list of Patch that intersect VectorData d in that Layer
final Layer layer = lidm.get(el.getKey());
if (null == layer) {
Utils.log("ERROR layer with id " + el.getKey() + " NOT FOUND in target layerset!");
continue;
}
// Utils.log("Editing Displayable " + d + " at layer " + layer);
// list of Patch ids affecting VectorData/Displayable d
final ArrayList<Long> pids = new ArrayList<Long>(el.getValue().values());
// so now Patch ids are sorted from top to bottom
Collections.reverse(pids);
// The area already processed in the layer
final Area used_area = new Area();
// The map of areas vs transforms for each area to apply to the VectorData, to its data within the layer only
final VectorDataTransform vdt = new VectorDataTransform(layer);
// The list of transforms to apply to each VectorData
for (final long pid : pids) {
// Find the Patch with id 'pid' in Layer 'la' of the target LayerSet:
final DBObject ob = layer.findById(pid);
if (null == ob || !(ob instanceof Patch)) {
Utils.log("ERROR layer with id " + layer.getId() + " DOES NOT CONTAIN a Patch with id " + pid);
continue;
}
final Patch patch = (Patch) ob;
// no need to synch, read only from now on
final Patch.TransformProperties props = rd.tp.get(pid);
if (null == props) {
Utils.log("ERROR: could not find any Patch.TransformProperties for patch " + patch);
continue;
}
final Area a = new Area(props.area);
a.subtract(used_area);
if (M.isEmpty(a)) {
// skipping fully occluded Patch
continue;
}
// Accumulate:
used_area.add(props.area);
// For the remaining area within this Layer, define a transform
// Generate a CoordinateTransformList that includes:
// 1 - an inverted transform from Patch coords to world coords
// 2 - the CoordinateTransform of the Patch, if any
// 3 - the AffineTransform of the Patch
//
// The idea is to first send the data from world to pixel space of the Patch, using the old transfroms,
// and then from pixel space of the Patch to world, using the new transforms.
final CoordinateTransformList tlist = new CoordinateTransformList();
// 1. Inverse of the old affine: from world into the old patch mipmap
final mpicbg.models.AffineModel2D aff_inv = new mpicbg.models.AffineModel2D();
try {
aff_inv.set(props.at.createInverse());
} catch (final NoninvertibleTransformException nite) {
Utils.log("ERROR: could not invert the affine transform for Patch " + patch);
IJError.print(nite);
continue;
}
tlist.add(aff_inv);
// 2. Inverse of the old coordinate transform of the Patch: from old mipmap to pixels in original image
if (null != props.ct) {
// The props.ct is a CoordinateTransform, not necessarily an InvertibleCoordinateTransform
// So the mesh is necessary to ensure the invertibility
final mpicbg.trakem2.transform.TransformMesh mesh = new mpicbg.trakem2.transform.TransformMesh(props.ct, props.meshResolution, props.o_width, props.o_height);
/* // Apparently not needed; the inverse affine in step 1 took care of it.
* // (the affine of step 1 includes the mesh translation)
Rectangle box = mesh.getBoundingBox();
AffineModel2D aff = new AffineModel2D();
aff.set(new AffineTransform(1, 0, 0, 1, box.x, box.y));
tlist.add(aff);
*/
tlist.add(new InverseICT(mesh));
}
// 3. New coordinate transform of the Patch: from original image to new mipmap
final mpicbg.trakem2.transform.CoordinateTransform ct = patch.getCoordinateTransform();
if (null != ct) {
tlist.add(ct);
final mpicbg.trakem2.transform.TransformMesh mesh = new mpicbg.trakem2.transform.TransformMesh(ct, patch.getMeshResolution(), patch.getOWidth(), patch.getOHeight());
// correct for mesh bounds -- Necessary because it comes from the other side, and the removal of the translation here is re-added by the affine in step 4!
final Rectangle box = mesh.getBoundingBox();
final AffineModel2D aff = new AffineModel2D();
aff.set(new AffineTransform(1, 0, 0, 1, -box.x, -box.y));
tlist.add(aff);
}
// 4. New affine transform of the Patch: from mipmap to world
final mpicbg.models.AffineModel2D new_aff = new mpicbg.models.AffineModel2D();
new_aff.set(patch.getAffineTransform());
tlist.add(new_aff);
/*
// TODO Consider caching the tlist for each Patch, or for a few thousand of them maximum.
// But it could blow up memory astronomically.
// The old part:
final mpicbg.models.InvertibleCoordinateTransformList old = new mpicbg.models.InvertibleCoordinateTransformList();
if (null != props.ct) {
mpicbg.trakem2.transform.TransformMesh mesh = new mpicbg.trakem2.transform.TransformMesh(props.ct, props.meshResolution, props.o_width, props.o_height);
old.add(mesh);
}
final mpicbg.models.AffineModel2D old_aff = new mpicbg.models.AffineModel2D();
old_aff.set(props.at);
old.add(old_aff);
tlist.add(new InverseICT(old));
// The new part:
final mpicbg.models.AffineModel2D new_aff = new mpicbg.models.AffineModel2D();
new_aff.set(patch.getAffineTransform());
tlist.add(new_aff);
final mpicbg.trakem2.transform.CoordinateTransform ct = patch.getCoordinateTransform();
if (null != ct) tlist.add(ct);
*/
vdt.add(a, tlist);
}
// Apply the map of area vs tlist for the data section of d within the layer:
try {
((VectorData) d).apply(vdt);
} catch (final Exception t) {
Utils.log("ERROR transformation failed for " + d + " at layer " + layer);
IJError.print(t);
}
}
}
}));
}
Utils.wait(fus);
Display.repaint();
} finally {
exec.shutdown();
}
}
use of mpicbg.models.AffineModel2D in project TrakEM2 by trakem2.
the class Align method alignLayersLinearly.
/**
* Align a range of layers by accumulating pairwise alignments of contiguous layers.
*
* @param layers The range of layers to align pairwise.
* @param numThreads The number of threads to use.
* @param filter The {@link Filter} to decide which {@link Patch} instances to use in each {@link Layer}. Can be null.
*/
public static final void alignLayersLinearly(final List<Layer> layers, final int numThreads, final Filter<Patch> filter) {
param.sift.maxOctaveSize = 1600;
if (!param.setup("Align layers linearly"))
return;
final Rectangle box = layers.get(0).getParent().getMinimalBoundingBox(Patch.class);
final double scale = Math.min(1.0, Math.min((double) param.sift.maxOctaveSize / box.width, (double) param.sift.maxOctaveSize / box.height));
final Param p = param.clone();
p.maxEpsilon *= scale;
final FloatArray2DSIFT sift = new FloatArray2DSIFT(p.sift);
final SIFT ijSIFT = new SIFT(sift);
Rectangle box1 = null;
Rectangle box2 = null;
final Collection<Feature> features1 = new ArrayList<Feature>();
final Collection<Feature> features2 = new ArrayList<Feature>();
final List<PointMatch> candidates = new ArrayList<PointMatch>();
final List<PointMatch> inliers = new ArrayList<PointMatch>();
final AffineTransform a = new AffineTransform();
int i = 0;
for (final Layer l : layers) {
long s = System.currentTimeMillis();
features1.clear();
features1.addAll(features2);
features2.clear();
final Rectangle box3 = l.getMinimalBoundingBox(Patch.class);
if (box3 == null)
continue;
box1 = box2;
box2 = box3;
final List<Patch> patches = l.getAll(Patch.class);
if (null != filter) {
for (final Iterator<Patch> it = patches.iterator(); it.hasNext(); ) {
if (!filter.accept(it.next()))
it.remove();
}
}
ijSIFT.extractFeatures(l.getProject().getLoader().getFlatImage(l, box2, scale, 0xffffffff, ImagePlus.GRAY8, Patch.class, patches, true).getProcessor(), features2);
Utils.log(features2.size() + " features extracted in layer \"" + l.getTitle() + "\" (took " + (System.currentTimeMillis() - s) + " ms).");
if (features1.size() > 0) {
s = System.currentTimeMillis();
candidates.clear();
FeatureTransform.matchFeatures(features2, features1, candidates, p.rod);
final AbstractAffineModel2D<?> model;
switch(p.expectedModelIndex) {
case 0:
model = new TranslationModel2D();
break;
case 1:
model = new RigidModel2D();
break;
case 2:
model = new SimilarityModel2D();
break;
case 3:
model = new AffineModel2D();
break;
default:
return;
}
boolean modelFound;
boolean again = false;
try {
do {
again = false;
modelFound = model.filterRansac(candidates, inliers, 1000, p.maxEpsilon, p.minInlierRatio, p.minNumInliers, 3);
if (modelFound && p.rejectIdentity) {
final ArrayList<Point> points = new ArrayList<Point>();
PointMatch.sourcePoints(inliers, points);
if (Transforms.isIdentity(model, points, p.identityTolerance)) {
Utils.log("Identity transform for " + inliers.size() + " matches rejected.");
candidates.removeAll(inliers);
inliers.clear();
again = true;
}
}
} while (again);
} catch (final NotEnoughDataPointsException e) {
modelFound = false;
}
if (modelFound) {
Utils.log("Model found for layer \"" + l.getTitle() + "\" and its predecessor:\n correspondences " + inliers.size() + " of " + candidates.size() + "\n average residual error " + (model.getCost() / scale) + " px\n took " + (System.currentTimeMillis() - s) + " ms");
final AffineTransform b = new AffineTransform();
b.translate(box1.x, box1.y);
b.scale(1.0f / scale, 1.0f / scale);
b.concatenate(model.createAffine());
b.scale(scale, scale);
b.translate(-box2.x, -box2.y);
a.concatenate(b);
l.apply(Displayable.class, a);
Display.repaint(l);
} else {
Utils.log("No model found for layer \"" + l.getTitle() + "\" and its predecessor:\n correspondence candidates " + candidates.size() + "\n took " + (System.currentTimeMillis() - s) + " ms");
a.setToIdentity();
}
}
IJ.showProgress(++i, layers.size());
}
}
use of mpicbg.models.AffineModel2D in project TrakEM2 by trakem2.
the class Align method alignTileCollections.
/**
* Align two collections of tiles
* @param p
* @param a
* @param b
*/
public static final void alignTileCollections(final Param p, final Collection<AbstractAffineTile2D<?>> a, final Collection<AbstractAffineTile2D<?>> b) {
final ArrayList<Patch> pa = new ArrayList<Patch>();
final ArrayList<Patch> pb = new ArrayList<Patch>();
for (final AbstractAffineTile2D<?> t : a) pa.add(t.getPatch());
for (final AbstractAffineTile2D<?> t : b) pb.add(t.getPatch());
final Layer la = pa.iterator().next().getLayer();
final Layer lb = pb.iterator().next().getLayer();
final Rectangle boxA = Displayable.getBoundingBox(pa, null);
final Rectangle boxB = Displayable.getBoundingBox(pb, null);
final double scale = Math.min(1.0, Math.min(Math.min((double) p.sift.maxOctaveSize / boxA.width, (double) p.sift.maxOctaveSize / boxA.height), Math.min((double) p.sift.maxOctaveSize / boxB.width, (double) p.sift.maxOctaveSize / boxB.height)));
final Param pp = p.clone();
pp.maxEpsilon *= scale;
final FloatArray2DSIFT sift = new FloatArray2DSIFT(pp.sift);
final SIFT ijSIFT = new SIFT(sift);
final Collection<Feature> featuresA = new ArrayList<Feature>();
final Collection<Feature> featuresB = new ArrayList<Feature>();
final List<PointMatch> candidates = new ArrayList<PointMatch>();
final List<PointMatch> inliers = new ArrayList<PointMatch>();
long s = System.currentTimeMillis();
ijSIFT.extractFeatures(la.getProject().getLoader().getFlatImage(la, boxA, scale, 0xffffffff, ImagePlus.GRAY8, null, pa, true, Color.GRAY).getProcessor(), featuresA);
Utils.log(featuresA.size() + " features extracted in graph A in layer \"" + la.getTitle() + "\" (took " + (System.currentTimeMillis() - s) + " ms).");
s = System.currentTimeMillis();
ijSIFT.extractFeatures(lb.getProject().getLoader().getFlatImage(lb, boxB, scale, 0xffffffff, ImagePlus.GRAY8, null, pb, true, Color.GRAY).getProcessor(), featuresB);
Utils.log(featuresB.size() + " features extracted in graph B in layer \"" + lb.getTitle() + "\" (took " + (System.currentTimeMillis() - s) + " ms).");
if (featuresA.size() > 0 && featuresB.size() > 0) {
s = System.currentTimeMillis();
FeatureTransform.matchFeatures(featuresA, featuresB, candidates, pp.rod);
final AbstractAffineModel2D<?> model;
switch(p.expectedModelIndex) {
case 0:
model = new TranslationModel2D();
break;
case 1:
model = new RigidModel2D();
break;
case 2:
model = new SimilarityModel2D();
break;
case 3:
model = new AffineModel2D();
break;
default:
return;
}
boolean modelFound;
boolean again = false;
try {
do {
again = false;
modelFound = model.filterRansac(candidates, inliers, 1000, p.maxEpsilon, p.minInlierRatio, p.minNumInliers, 3);
if (modelFound && p.rejectIdentity) {
final ArrayList<Point> points = new ArrayList<Point>();
PointMatch.sourcePoints(inliers, points);
if (Transforms.isIdentity(model, points, p.identityTolerance)) {
Utils.log("Identity transform for " + inliers.size() + " matches rejected.");
candidates.removeAll(inliers);
inliers.clear();
again = true;
}
}
} while (again);
} catch (final NotEnoughDataPointsException e) {
modelFound = false;
}
if (modelFound) {
Utils.log("Model found for graph A and B in layers \"" + la.getTitle() + "\" and \"" + lb.getTitle() + "\":\n correspondences " + inliers.size() + " of " + candidates.size() + "\n average residual error " + (model.getCost() / scale) + " px\n took " + (System.currentTimeMillis() - s) + " ms");
final AffineTransform at = new AffineTransform();
at.translate(boxA.x, boxA.y);
at.scale(1.0f / scale, 1.0f / scale);
at.concatenate(model.createAffine());
at.scale(scale, scale);
at.translate(-boxB.x, -boxB.y);
for (final Patch t : pa) t.preTransform(at, false);
Display.repaint(la);
} else
Utils.log("No model found for graph A and B in layers \"" + la.getTitle() + "\" and \"" + lb.getTitle() + "\":\n correspondence candidates " + candidates.size() + "\n took " + (System.currentTimeMillis() - s) + " ms");
}
}
use of mpicbg.models.AffineModel2D in project TrakEM2 by trakem2.
the class AlignLayersTask method alignLayersLinearlyJob.
public static final void alignLayersLinearlyJob(final LayerSet layerSet, final int first, final int last, final boolean propagateTransform, final Rectangle fov, final Filter<Patch> filter) {
// will reverse order if necessary
final List<Layer> layerRange = layerSet.getLayers(first, last);
final Align.Param p = Align.param.clone();
// find the first non-empty layer, and remove all empty layers
Rectangle box = fov;
for (final Iterator<Layer> it = layerRange.iterator(); it.hasNext(); ) {
final Layer la = it.next();
if (!la.contains(Patch.class, true)) {
it.remove();
continue;
}
if (null == box) {
// The first layer:
// Only for visible patches
box = la.getMinimalBoundingBox(Patch.class, true);
}
}
if (0 == layerRange.size()) {
Utils.log("All layers in range are empty!");
return;
}
/* do not work if there is only one layer selected */
if (layerRange.size() < 2)
return;
final double scale = Math.min(1.0, Math.min((double) p.sift.maxOctaveSize / (double) box.width, (double) p.sift.maxOctaveSize / (double) box.height));
p.maxEpsilon *= scale;
p.identityTolerance *= scale;
// Utils.log2("scale: " + scale + " maxOctaveSize: " + p.sift.maxOctaveSize + " box: " + box.width + "," + box.height);
final FloatArray2DSIFT sift = new FloatArray2DSIFT(p.sift);
final SIFT ijSIFT = new SIFT(sift);
Rectangle box1 = fov;
Rectangle box2 = fov;
final Collection<Feature> features1 = new ArrayList<Feature>();
final Collection<Feature> features2 = new ArrayList<Feature>();
final List<PointMatch> candidates = new ArrayList<PointMatch>();
final List<PointMatch> inliers = new ArrayList<PointMatch>();
final AffineTransform a = new AffineTransform();
int s = 0;
for (final Layer layer : layerRange) {
if (Thread.currentThread().isInterrupted())
return;
final long t0 = System.currentTimeMillis();
features1.clear();
features1.addAll(features2);
features2.clear();
final Rectangle box3 = layer.getMinimalBoundingBox(Patch.class, true);
// skipping empty layer
if (box3 == null || (box3.width == 0 && box3.height == 0))
continue;
box1 = null == fov ? box2 : fov;
box2 = null == fov ? box3 : fov;
final List<Patch> patches = layer.getAll(Patch.class);
if (null != filter) {
for (final Iterator<Patch> it = patches.iterator(); it.hasNext(); ) {
if (!filter.accept(it.next()))
it.remove();
}
}
final ImageProcessor flatImage = layer.getProject().getLoader().getFlatImage(layer, box2, scale, 0xffffffff, ImagePlus.GRAY8, Patch.class, patches, true).getProcessor();
ijSIFT.extractFeatures(flatImage, features2);
IJ.log(features2.size() + " features extracted in layer \"" + layer.getTitle() + "\" (took " + (System.currentTimeMillis() - t0) + " ms).");
if (features1.size() > 0) {
final long t1 = System.currentTimeMillis();
candidates.clear();
FeatureTransform.matchFeatures(features2, features1, candidates, p.rod);
final AbstractAffineModel2D<?> model;
switch(p.expectedModelIndex) {
case 0:
model = new TranslationModel2D();
break;
case 1:
model = new RigidModel2D();
break;
case 2:
model = new SimilarityModel2D();
break;
case 3:
model = new AffineModel2D();
break;
default:
return;
}
final AbstractAffineModel2D<?> desiredModel;
switch(p.desiredModelIndex) {
case 0:
desiredModel = new TranslationModel2D();
break;
case 1:
desiredModel = new RigidModel2D();
break;
case 2:
desiredModel = new SimilarityModel2D();
break;
case 3:
desiredModel = new AffineModel2D();
break;
default:
return;
}
boolean modelFound;
boolean again = false;
try {
do {
again = false;
modelFound = model.filterRansac(candidates, inliers, 1000, p.maxEpsilon, p.minInlierRatio, p.minNumInliers, 3);
if (modelFound && p.rejectIdentity) {
final ArrayList<Point> points = new ArrayList<Point>();
PointMatch.sourcePoints(inliers, points);
if (Transforms.isIdentity(model, points, p.identityTolerance)) {
IJ.log("Identity transform for " + inliers.size() + " matches rejected.");
candidates.removeAll(inliers);
inliers.clear();
again = true;
}
}
} while (again);
if (modelFound)
desiredModel.fit(inliers);
} catch (final NotEnoughDataPointsException e) {
modelFound = false;
} catch (final IllDefinedDataPointsException e) {
modelFound = false;
}
if (Thread.currentThread().isInterrupted())
return;
if (modelFound) {
IJ.log("Model found for layer \"" + layer.getTitle() + "\" and its predecessor:\n correspondences " + inliers.size() + " of " + candidates.size() + "\n average residual error " + (model.getCost() / scale) + " px\n took " + (System.currentTimeMillis() - t1) + " ms");
final AffineTransform b = new AffineTransform();
b.translate(box1.x, box1.y);
b.scale(1.0f / scale, 1.0f / scale);
b.concatenate(desiredModel.createAffine());
b.scale(scale, scale);
b.translate(-box2.x, -box2.y);
a.concatenate(b);
AlignTask.transformPatchesAndVectorData(patches, a);
Display.repaint(layer);
} else {
IJ.log("No model found for layer \"" + layer.getTitle() + "\" and its predecessor:\n correspondence candidates " + candidates.size() + "\n took " + (System.currentTimeMillis() - s) + " ms");
a.setToIdentity();
}
}
IJ.showProgress(++s, layerRange.size());
}
if (Thread.currentThread().isInterrupted())
return;
if (propagateTransform) {
if (last > first && last < layerSet.size() - 2)
for (final Layer la : layerSet.getLayers(last + 1, layerSet.size() - 1)) {
if (Thread.currentThread().isInterrupted())
return;
AlignTask.transformPatchesAndVectorData(la, a);
}
else if (first > last && last > 0)
for (final Layer la : layerSet.getLayers(0, last - 1)) {
if (Thread.currentThread().isInterrupted())
return;
AlignTask.transformPatchesAndVectorData(la, a);
}
}
}
Aggregations