use of mpicbg.trakem2.util.Downsampler.Pair in project TrakEM2 by trakem2.
the class ExportUnsignedByte method makeFlatImage.
public static final Pair<ByteProcessor, ByteProcessor> makeFlatImage(final List<Patch> patches, final Rectangle roi, final double backgroundValue, final double scale, final ImageSource fetcher) {
final ByteProcessor target = new ByteProcessor((int) (roi.width * scale), (int) (roi.height * scale));
target.setInterpolationMethod(ImageProcessor.BILINEAR);
final ByteProcessor targetMask = new ByteProcessor(target.getWidth(), target.getHeight());
targetMask.setInterpolationMethod(ImageProcessor.NEAREST_NEIGHBOR);
for (final Patch patch : patches) {
final ImageData imgd = fetcher.fetch(patch, scale);
// The affine to apply to the MipMap.image
final AffineTransform atc = new AffineTransform();
atc.scale(scale, scale);
atc.translate(-roi.x, -roi.y);
final AffineTransform at = new AffineTransform();
at.preConcatenate(atc);
at.concatenate(patch.getAffineTransform());
at.scale(imgd.scaleX, imgd.scaleY);
final AffineModel2D aff = new AffineModel2D();
aff.set(at);
final CoordinateTransformMesh mesh = new CoordinateTransformMesh(aff, patch.getMeshResolution(), imgd.bp.getWidth(), imgd.bp.getHeight());
final TransformMeshMappingWithMasks<CoordinateTransformMesh> mapping = new TransformMeshMappingWithMasks<CoordinateTransformMesh>(mesh);
imgd.bp.setInterpolationMethod(ImageProcessor.BILINEAR);
// no interpolation
imgd.alpha.setInterpolationMethod(ImageProcessor.NEAREST_NEIGHBOR);
mapping.map(imgd.bp, imgd.alpha, target, targetMask);
}
return new Pair<ByteProcessor, ByteProcessor>(target, targetMask);
}
use of mpicbg.trakem2.util.Downsampler.Pair in project TrakEM2 by trakem2.
the class ExportUnsignedShort method makeFlatImage.
public static final Pair<ShortProcessor, ByteProcessor> makeFlatImage(final List<Patch> patches, final Rectangle roi, final double backgroundValue, final double scale, final boolean makeAlphaMask) {
final ArrayList<PatchIntensityRange> patchIntensityRanges = new ArrayList<PatchIntensityRange>();
double min = Double.MAX_VALUE;
double max = -Double.MAX_VALUE;
for (final Displayable d : patches) {
final Patch patch = (Patch) d;
final PatchIntensityRange pir = new PatchIntensityRange(patch);
if (pir.min < min)
min = pir.min;
if (pir.max > max)
max = pir.max;
patchIntensityRanges.add(pir);
}
final double minI = -min * 65535.0 / (max - min);
final double maxI = (1.0 - min) * 65535.0 / (max - min);
final ShortProcessor sp;
if (Double.isNaN(scale)) {
sp = new ShortProcessor(roi.width, roi.height);
} else {
sp = new ShortProcessor((int) (roi.width * scale + 0.5), (int) (roi.height * scale + 0.5));
}
sp.setMinAndMax(minI, maxI);
if (0 != backgroundValue) {
sp.setValue(backgroundValue);
sp.setRoi(0, 0, roi.width, roi.height);
sp.fill();
}
final ByteProcessor alphaTarget = makeAlphaMask ? new ByteProcessor(sp.getWidth(), sp.getHeight()) : null;
for (final PatchIntensityRange pir : patchIntensityRanges) {
map(new PatchTransform(pir), roi.x, roi.y, scale, mapIntensities(pir, min, max), sp, alphaTarget);
}
return new Pair<ShortProcessor, ByteProcessor>(sp, alphaTarget);
}
use of mpicbg.trakem2.util.Downsampler.Pair in project TrakEM2 by trakem2.
the class ElasticLayerAlignment method preAlignStack.
private void preAlignStack(final Param param, final Project project, final List<Layer> layerRange, final Rectangle box, final Filter<Patch> filter, final ArrayList<Triple<Integer, Integer, AbstractModel<?>>> pairs) {
final double scale = Math.min(1.0, Math.min((double) param.ppm.sift.maxOctaveSize / (double) box.width, (double) param.ppm.sift.maxOctaveSize / (double) box.height));
/* extract and save features, overwrite cached files if requested */
try {
AlignmentUtils.extractAndSaveLayerFeatures(layerRange, box, scale, filter, param.ppm.sift, param.ppm.clearCache, param.ppm.maxNumThreadsSift);
} catch (final Exception e) {
return;
}
/* match and filter feature correspondences */
int numFailures = 0;
final double pointMatchScale = param.layerScale / scale;
for (int i = 0; i < layerRange.size(); ++i) {
final ArrayList<Thread> threads = new ArrayList<Thread>(param.maxNumThreads);
final int sliceA = i;
final Layer layerA = layerRange.get(i);
final int range = Math.min(layerRange.size(), i + param.maxNumNeighbors + 1);
final String layerNameA = layerName(layerA);
for (int j = i + 1; j < range; ) J: {
final int numThreads = Math.min(param.maxNumThreads, range - j);
final ArrayList<Triple<Integer, Integer, AbstractModel<?>>> models = new ArrayList<Triple<Integer, Integer, AbstractModel<?>>>(numThreads);
for (int k = 0; k < numThreads; ++k) models.add(null);
for (int t = 0; t < numThreads && j < range; ++t, ++j) {
final int ti = t;
final int sliceB = j;
final Layer layerB = layerRange.get(j);
final String layerNameB = layerName(layerB);
final Thread thread = new Thread() {
@Override
public void run() {
IJ.showProgress(sliceA, layerRange.size() - 1);
Utils.log("matching " + layerNameB + " -> " + layerNameA + "...");
ArrayList<PointMatch> candidates = null;
if (!param.ppm.clearCache)
candidates = mpicbg.trakem2.align.Util.deserializePointMatches(project, param.ppm, "layer", layerB.getId(), layerA.getId());
if (null == candidates) {
final ArrayList<Feature> fs1 = mpicbg.trakem2.align.Util.deserializeFeatures(project, param.ppm.sift, "layer", layerA.getId());
final ArrayList<Feature> fs2 = mpicbg.trakem2.align.Util.deserializeFeatures(project, param.ppm.sift, "layer", layerB.getId());
candidates = new ArrayList<PointMatch>(FloatArray2DSIFT.createMatches(fs2, fs1, param.ppm.rod));
/* scale the candidates */
for (final PointMatch pm : candidates) {
final Point p1 = pm.getP1();
final Point p2 = pm.getP2();
final double[] l1 = p1.getL();
final double[] w1 = p1.getW();
final double[] l2 = p2.getL();
final double[] w2 = p2.getW();
l1[0] *= pointMatchScale;
l1[1] *= pointMatchScale;
w1[0] *= pointMatchScale;
w1[1] *= pointMatchScale;
l2[0] *= pointMatchScale;
l2[1] *= pointMatchScale;
w2[0] *= pointMatchScale;
w2[1] *= pointMatchScale;
}
if (!mpicbg.trakem2.align.Util.serializePointMatches(project, param.ppm, "layer", layerB.getId(), layerA.getId(), candidates))
Utils.log("Could not store point match candidates for layers " + layerNameB + " and " + layerNameA + ".");
}
AbstractModel<?> model;
switch(param.expectedModelIndex) {
case 0:
model = new TranslationModel2D();
break;
case 1:
model = new RigidModel2D();
break;
case 2:
model = new SimilarityModel2D();
break;
case 3:
model = new AffineModel2D();
break;
case 4:
model = new HomographyModel2D();
break;
default:
return;
}
final ArrayList<PointMatch> inliers = new ArrayList<PointMatch>();
boolean modelFound;
boolean again = false;
try {
do {
again = false;
modelFound = model.filterRansac(candidates, inliers, 1000, param.maxEpsilon * param.layerScale, param.minInlierRatio, param.minNumInliers, 3);
if (modelFound && param.rejectIdentity) {
final ArrayList<Point> points = new ArrayList<Point>();
PointMatch.sourcePoints(inliers, points);
if (Transforms.isIdentity(model, points, param.identityTolerance * param.layerScale)) {
IJ.log("Identity transform for " + inliers.size() + " matches rejected.");
candidates.removeAll(inliers);
inliers.clear();
again = true;
}
}
} while (again);
} catch (final NotEnoughDataPointsException e) {
modelFound = false;
}
if (modelFound) {
Utils.log(layerNameB + " -> " + layerNameA + ": " + inliers.size() + " corresponding features with an average displacement of " + (PointMatch.meanDistance(inliers) / param.layerScale) + "px identified.");
Utils.log("Estimated transformation model: " + model);
models.set(ti, new Triple<Integer, Integer, AbstractModel<?>>(sliceA, sliceB, model));
} else {
Utils.log(layerNameB + " -> " + layerNameA + ": no correspondences found.");
return;
}
}
};
threads.add(thread);
thread.start();
}
try {
for (final Thread thread : threads) thread.join();
} catch (final InterruptedException e) {
Utils.log("Establishing feature correspondences interrupted.");
for (final Thread thread : threads) thread.interrupt();
try {
for (final Thread thread : threads) thread.join();
} catch (final InterruptedException f) {
}
return;
}
threads.clear();
/* collect successfully matches pairs and break the search on gaps */
for (int t = 0; t < models.size(); ++t) {
final Triple<Integer, Integer, AbstractModel<?>> pair = models.get(t);
if (pair == null) {
if (++numFailures > param.maxNumFailures) {
break J;
}
} else {
numFailures = 0;
pairs.add(pair);
}
}
}
}
}
use of mpicbg.trakem2.util.Downsampler.Pair in project TrakEM2 by trakem2.
the class RegularizedAffineLayerAlignment method exec.
/**
* @param param
* @param layerRange
* @param fixedLayers
* @param emptyLayers
* @param box
* @param propagateTransformAfter
* @param filter
* @throws Exception
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public final void exec(final Param param, final List<Layer> layerRange, final Set<Layer> fixedLayers, final Set<Layer> emptyLayers, final Rectangle box, final boolean propagateTransformBefore, final boolean propagateTransformAfter, final Filter<Patch> filter) throws Exception {
final double scale = Math.min(1.0, Math.min((double) param.ppm.sift.maxOctaveSize / (double) box.width, (double) param.ppm.sift.maxOctaveSize / (double) box.height));
final ExecutorService exec = ExecutorProvider.getExecutorService(1.0f / (float) param.maxNumThreads);
/* create tiles and models for all layers */
final ArrayList<Tile<?>> tiles = new ArrayList<Tile<?>>();
final AbstractAffineModel2D<?> m = (AbstractAffineModel2D<?>) Util.createModel(param.desiredModelIndex);
final AbstractAffineModel2D<?> r = (AbstractAffineModel2D<?>) Util.createModel(param.regularizerIndex);
for (int i = 0; i < layerRange.size(); ++i) {
if (param.regularize)
tiles.add(new Tile(new InterpolatedAffineModel2D(m.copy(), r.copy(), param.lambda)));
else
tiles.add(new Tile(m.copy()));
}
/* collect all pairs of slices for which a model could be found */
final ArrayList<Triple<Integer, Integer, Collection<PointMatch>>> pairs = new ArrayList<Triple<Integer, Integer, Collection<PointMatch>>>();
/* extract and save features, overwrite cached files if requested */
try {
AlignmentUtils.extractAndSaveLayerFeatures(layerRange, box, scale, filter, param.ppm.sift, param.ppm.clearCache, param.ppm.maxNumThreadsSift);
} catch (final Exception e) {
e.printStackTrace();
IJError.print(e);
return;
}
/* match and filter feature correspondences */
int numFailures = 0, lastA = 0;
final double pointMatchScale = 1.0 / scale;
final ArrayList<Future<Triple<Integer, Integer, Collection<PointMatch>>>> modelFutures = new ArrayList<Future<Triple<Integer, Integer, Collection<PointMatch>>>>();
for (int i = 0; i < layerRange.size(); ++i) {
final int range = Math.min(layerRange.size(), i + param.maxNumNeighbors + 1);
for (int j = i + 1; j < range; ++j) {
modelFutures.add(exec.submit(new CorrespondenceCallable(param, layerRange.get(i), layerRange.get(j), pointMatchScale, i, j)));
}
}
// Assume that futures are ordered in Triple.a
try {
for (final Future<Triple<Integer, Integer, Collection<PointMatch>>> future : modelFutures) {
final Triple<Integer, Integer, Collection<PointMatch>> pair = future.get();
if (lastA != pair.a) {
numFailures = 0;
lastA = pair.a;
}
if (pair.c == null) {
numFailures++;
// TODO: Cancel futures associated with pair.a
} else if (numFailures < param.maxNumFailures) {
pairs.add(pair);
}
}
} catch (final InterruptedException ie) {
Utils.log("Establishing feature correspondences interrupted.");
for (final Future<Triple<Integer, Integer, Collection<PointMatch>>> future : modelFutures) {
future.cancel(true);
}
return;
}
/* collect successfully matches pairs and break the search on gaps */
/*
for ( int t = 0; t < models.size(); ++t )
{
final Triple< Integer, Integer, Collection< PointMatch > > pair = models.get( t );
if ( pair == null )
{
if ( ++numFailures > param.maxNumFailures )
break J;
}
else
{
numFailures = 0;
pairs.add( pair );
}
}
*/
/* Optimization */
final TileConfiguration tileConfiguration = new TileConfiguration();
for (final Triple<Integer, Integer, Collection<PointMatch>> pair : pairs) {
final Tile<?> t1 = tiles.get(pair.a);
final Tile<?> t2 = tiles.get(pair.b);
tileConfiguration.addTile(t1);
tileConfiguration.addTile(t2);
t2.connect(t1, pair.c);
}
for (int i = 0; i < layerRange.size(); ++i) {
final Layer layer = layerRange.get(i);
if (fixedLayers.contains(layer))
tileConfiguration.fixTile(tiles.get(i));
}
final List<Tile<?>> nonPreAlignedTiles = tileConfiguration.preAlign();
IJ.log("pre-aligned all but " + nonPreAlignedTiles.size() + " tiles");
tileConfiguration.optimize(param.maxEpsilon, param.maxIterationsOptimize, param.maxPlateauwidthOptimize);
Utils.log(new StringBuffer("Successfully optimized configuration of ").append(tiles.size()).append(" tiles:").toString());
Utils.log(" average displacement: " + String.format("%.3f", tileConfiguration.getError()) + "px");
Utils.log(" minimal displacement: " + String.format("%.3f", tileConfiguration.getMinError()) + "px");
Utils.log(" maximal displacement: " + String.format("%.3f", tileConfiguration.getMaxError()) + "px");
if (propagateTransformBefore || propagateTransformAfter) {
final Layer first = layerRange.get(0);
final List<Layer> layers = first.getParent().getLayers();
if (propagateTransformBefore) {
final AffineTransform b = translateAffine(box, ((Affine2D<?>) tiles.get(0).getModel()).createAffine());
final int firstLayerIndex = first.getParent().getLayerIndex(first.getId());
for (int i = 0; i < firstLayerIndex; ++i) applyTransformToLayer(layers.get(i), b, filter);
}
if (propagateTransformAfter) {
final Layer last = layerRange.get(layerRange.size() - 1);
final AffineTransform b = translateAffine(box, ((Affine2D<?>) tiles.get(tiles.size() - 1).getModel()).createAffine());
final int lastLayerIndex = last.getParent().getLayerIndex(last.getId());
for (int i = lastLayerIndex + 1; i < layers.size(); ++i) applyTransformToLayer(layers.get(i), b, filter);
}
}
for (int i = 0; i < layerRange.size(); ++i) {
final AffineTransform b = translateAffine(box, ((Affine2D<?>) tiles.get(i).getModel()).createAffine());
applyTransformToLayer(layerRange.get(i), b, filter);
}
Utils.log("Done.");
}
Aggregations