use of mpicbg.trakem2.transform.TransformMeshMapping in project TrakEM2 by trakem2.
the class Patch method keyPressed.
@Override
public void keyPressed(final KeyEvent ke) {
final Object source = ke.getSource();
if (!(source instanceof DisplayCanvas))
return;
final DisplayCanvas dc = (DisplayCanvas) source;
final Roi roi = dc.getFakeImagePlus().getRoi();
final int mod = ke.getModifiers();
switch(ke.getKeyCode()) {
case KeyEvent.VK_C:
// Ignoring masks: outside is already black, and ImageJ cannot handle alpha masks.
if (0 == (mod ^ (Event.SHIFT_MASK | Event.ALT_MASK))) {
// Place the source image, untransformed, into clipboard:
final ImagePlus imp = getImagePlus();
if (null != imp)
imp.copy(false);
} else if (0 == mod || (0 == (mod ^ Event.SHIFT_MASK))) {
CoordinateTransformList<CoordinateTransform> list = null;
if (hasCoordinateTransform()) {
list = new CoordinateTransformList<CoordinateTransform>();
list.add(getCoordinateTransform());
}
if (0 == mod) {
// SHIFT is not down
final AffineModel2D am = new AffineModel2D();
am.set(this.at);
if (null == list)
list = new CoordinateTransformList<CoordinateTransform>();
list.add(am);
}
ImageProcessor ip;
if (null != list) {
final TransformMesh mesh = new TransformMesh(list, meshResolution, o_width, o_height);
final TransformMeshMapping mapping = new TransformMeshMapping(mesh);
ip = mapping.createMappedImageInterpolated(getImageProcessor());
} else {
ip = getImageProcessor();
}
new ImagePlus(this.title, ip).copy(false);
}
ke.consume();
break;
case KeyEvent.VK_F:
// fill mask with current ROI using
if (null != roi && M.isAreaROI(roi)) {
Bureaucrat.createAndStart(new Worker.Task("Filling image mask") {
@Override
public void exec() {
getLayerSet().addDataEditStep(Patch.this);
if (0 == mod) {
addAlphaMask(roi, ProjectToolbar.getForegroundColorValue());
} else if (0 == (mod ^ Event.SHIFT_MASK)) {
// shift is down: fill outside
try {
final Area localRoi = M.areaInInts(M.getArea(roi)).createTransformedArea(at.createInverse());
final Area invLocalRoi = new Area(new Rectangle(0, 0, getOWidth(), getOHeight()));
invLocalRoi.subtract(localRoi);
addAlphaMaskLocal(invLocalRoi, ProjectToolbar.getForegroundColorValue());
} catch (final NoninvertibleTransformException e) {
IJError.print(e);
return;
}
}
getLayerSet().addDataEditStep(Patch.this);
// wait
try {
updateMipMaps().get();
} catch (final Throwable t) {
IJError.print(t);
}
Display.repaint();
}
}, project);
}
// capturing:
ke.consume();
break;
default:
super.keyPressed(ke);
break;
}
}
use of mpicbg.trakem2.transform.TransformMeshMapping in project TrakEM2 by trakem2.
the class Patch method getArea.
/**
* Returns an Area in world coords representing the inside of this Patch. The fully alpha pixels are considered outside.
*/
@Override
public Area getArea() {
CoordinateTransform ct = null;
if (hasAlphaMask()) {
// Read the mask as a ROI for the 0 pixels only and apply the AffineTransform to it:
ImageProcessor alpha_mask = getAlphaMask();
if (null == alpha_mask) {
Utils.log2("Could not retrieve alpha mask for " + this);
} else {
if (hasCoordinateTransform()) {
// must transform it
ct = getCoordinateTransform();
final TransformMesh mesh = new TransformMesh(ct, meshResolution, o_width, o_height);
final TransformMeshMapping mapping = new TransformMeshMapping(mesh);
// Without interpolation
alpha_mask = mapping.createMappedImage(alpha_mask);
// Keep in mind the affine of the Patch already contains the translation specified by the mesh bounds.
}
// Threshold all non-zero areas of the mask:
alpha_mask.setThreshold(1, 255, ImageProcessor.NO_LUT_UPDATE);
final ImagePlus imp = new ImagePlus("", alpha_mask);
// TODO replace by our much faster method that scans by line, in AmiraImporter
final ThresholdToSelection tts = new ThresholdToSelection();
tts.setup("", imp);
tts.run(alpha_mask);
final Roi roi = imp.getRoi();
if (null == roi) {
// All pixels in the alpha mask have a value of zero
return new Area();
}
return M.getArea(roi).createTransformedArea(this.at);
}
}
// No alpha mask, or error in retrieving it:
final int[] x = new int[o_width + o_width + o_height + o_height];
final int[] y = new int[x.length];
int next = 0;
// Top edge:
for (int i = 0; i <= o_width; i++, next++) {
// len: o_width + 1
x[next] = i;
y[next] = 0;
}
// Right edge:
for (int i = 1; i <= o_height; i++, next++) {
// len: o_height
x[next] = o_width;
y[next] = i;
}
// bottom edge:
for (int i = o_width - 1; i > -1; i--, next++) {
// len: o_width
x[next] = i;
y[next] = o_height;
}
// left edge:
for (int i = o_height - 1; i > 0; i--, next++) {
// len: o_height -1
x[next] = 0;
y[next] = i;
}
if (hasCoordinateTransform() && null == ct)
ct = getCoordinateTransform();
if (null != ct) {
final CoordinateTransformList<CoordinateTransform> t = new CoordinateTransformList<CoordinateTransform>();
t.add(ct);
final TransformMesh mesh = new TransformMesh(ct, meshResolution, o_width, o_height);
final Rectangle box = mesh.getBoundingBox();
final AffineTransform aff = new AffineTransform(this.at);
// Must correct for the inverse of the mesh translation, because the affine also includes the translation.
aff.translate(-box.x, -box.y);
final AffineModel2D affm = new AffineModel2D();
affm.set(aff);
t.add(affm);
/*
* WORKS FINE, but for points that fall outside the mesh, they don't get transformed!
// Do it like Patch does it to generate the mipmap, with a mesh (and all the imprecisions of a mesh):
final CoordinateTransformList t = new CoordinateTransformList();
final TransformMesh mesh = new TransformMesh(this.ct, meshResolution, o_width, o_height);
final AffineTransform aff = new AffineTransform(this.at);
t.add(mesh);
final AffineModel2D affm = new AffineModel2D();
affm.set(aff);
t.add(affm);
*/
final double[] f = new double[] { x[0], y[0] };
t.applyInPlace(f);
final Path2D.Float path = new Path2D.Float(Path2D.Float.WIND_EVEN_ODD, x.length + 1);
path.moveTo(f[0], f[1]);
for (int i = 1; i < x.length; i++) {
f[0] = x[i];
f[1] = y[i];
t.applyInPlace(f);
path.lineTo(f[0], f[1]);
}
// line to last call to moveTo
path.closePath();
return new Area(path);
} else {
return new Area(new Polygon(x, y, x.length)).createTransformedArea(this.at);
}
}
use of mpicbg.trakem2.transform.TransformMeshMapping in project TrakEM2 by trakem2.
the class Patch method addAlphaMaskLocal.
/**
* Add the given area, in local coordinates, to the alpha mask, using the given fill value.
*/
public void addAlphaMaskLocal(final Area aLocal, int value) {
if (value < 0)
value = 0;
if (value > 255)
value = 255;
//
CoordinateTransform ct = null;
if (hasCoordinateTransform() && null == (ct = getCT())) {
return;
}
// When the area is larger than the image, sometimes the area fails to be set at all
// Also, intersection accelerates calls to contains(x,y) for complex polygons
final Area a = new Area(new Rectangle(0, 0, (int) (width + 1), (int) (height + 1)));
a.intersect(aLocal);
if (M.isEmpty(a)) {
Utils.log("ROI does not intersect the active image!");
return;
}
ByteProcessor mask = getAlphaMask();
// Use imglib to bypass all the problems with ShapeROI
// Create a Shape image with background and the Area on it with 'value'
final int background = (null != mask && 255 == value) ? 0 : 255;
final ShapeList<UnsignedByteType> shapeList = new ShapeList<UnsignedByteType>(new int[] { (int) width, (int) height, 1 }, new UnsignedByteType(background));
shapeList.addShape(a, new UnsignedByteType(value), new int[] { 0 });
final mpicbg.imglib.image.Image<UnsignedByteType> shapeListImage = new mpicbg.imglib.image.Image<UnsignedByteType>(shapeList, shapeList.getBackground(), "mask");
ByteProcessor rmask = (ByteProcessor) ImageJFunctions.copyToImagePlus(shapeListImage, ImagePlus.GRAY8).getProcessor();
if (hasCoordinateTransform()) {
// inverse the coordinate transform
final TransformMesh mesh = new TransformMesh(ct, meshResolution, o_width, o_height);
final TransformMeshMapping mapping = new TransformMeshMapping(mesh);
rmask = (ByteProcessor) mapping.createInverseMappedImageInterpolated(rmask);
}
if (null == mask) {
// There wasn't a mask, hence just set it
mask = rmask;
} else {
final byte[] b1 = (byte[]) mask.getPixels();
final byte[] b2 = (byte[]) rmask.getPixels();
// Whatever is not background in the new mask gets set on the old mask
for (int i = 0; i < b1.length; i++) {
// background pixel in new mask
if (background == (b2[i] & 0xff))
continue;
// replace old pixel with new pixel
b1[i] = b2[i];
}
}
setAlphaMask(mask);
}
use of mpicbg.trakem2.transform.TransformMeshMapping in project TrakEM2 by trakem2.
the class Patch method createCoordinateTransformedImage.
public final Patch.PatchImage createCoordinateTransformedImage() {
if (!hasCoordinateTransform())
return null;
final CoordinateTransform ct = getCoordinateTransform();
final ImageProcessor source = getImageProcessor();
// some error occurred
if (null == source)
return null;
// Utils.log2("source image dimensions: " + source.getWidth() + ", " + source.getHeight());
final TransformMesh mesh = new TransformMesh(ct, meshResolution, o_width, o_height);
final Rectangle box = mesh.getBoundingBox();
/* We can calculate the exact size of the image to be rendered, so let's do it */
// project.getLoader().releaseToFit(o_width, o_height, type, 5);
final long b = // outside and mask source
2 * o_width * o_height + // outside and mask target
2 * box.width * box.height + // image source
5 * o_width * o_height + // image target
5 * box.width * box.height;
project.getLoader().releaseToFit(b);
final TransformMeshMapping mapping = new TransformMeshMapping(mesh);
final ImageProcessorWithMasks target = mapping.createMappedMaskedImageInterpolated(source, getAlphaMask());
// Set the LUT
target.ip.setColorModel(source.getColorModel());
return new PatchImage(target.ip, (ByteProcessor) target.mask, target.outside, box, true);
}
use of mpicbg.trakem2.transform.TransformMeshMapping in project TrakEM2 by trakem2.
the class Render method render.
/**
* Renders a patch, mapping its intensities [min, max] → [0, 1]
*
* @param patch the patch to be rendered
* @param targetImage target pixels, specifies the target box
* @param targetWeight target weight pixels, depending on alpha
* @param x target box offset in world coordinates
* @param y target box offset in world coordinates
* @param scale target scale
*/
public static final void render(final Patch patch, final int coefficientsWidth, final int coefficientsHeight, final FloatProcessor targetImage, final FloatProcessor targetWeight, final ColorProcessor targetCoefficients, final double x, final double y, final double scale) {
/* assemble coordinate transformations and add bounding box offset */
final CoordinateTransformList<CoordinateTransform> ctl = new CoordinateTransformList<CoordinateTransform>();
ctl.add(patch.getFullCoordinateTransform());
final AffineModel2D affineScale = new AffineModel2D();
affineScale.set(scale, 0, 0, scale, -x * scale, -y * scale);
ctl.add(affineScale);
/* estimate average scale and generate downsampled source */
final int width = patch.getOWidth(), height = patch.getOHeight();
final double s = sampleAverageScale(ctl, width, height, width / patch.getMeshResolution());
final int mipmapLevel = bestMipmapLevel(s);
final ImageProcessor ipMipmap = Downsampler.downsampleImageProcessor(patch.getImageProcessor(), mipmapLevel);
/* create a target */
final ImageProcessor tp = ipMipmap.createProcessor(targetImage.getWidth(), targetImage.getHeight());
/* prepare and downsample alpha mask if there is one */
final ByteProcessor bpMaskMipmap;
final ByteProcessor bpMaskTarget;
final ByteProcessor bpMask = patch.getAlphaMask();
if (bpMask == null) {
bpMaskMipmap = null;
bpMaskTarget = null;
} else {
bpMaskMipmap = bpMask == null ? null : Downsampler.downsampleByteProcessor(bpMask, mipmapLevel);
bpMaskTarget = new ByteProcessor(tp.getWidth(), tp.getHeight());
}
/* create coefficients map */
final ColorProcessor cp = new ColorProcessor(ipMipmap.getWidth(), ipMipmap.getHeight());
final int w = cp.getWidth();
final int h = cp.getHeight();
for (int yi = 0; yi < h; ++yi) {
final int yc = yi * coefficientsHeight / h;
final int ic = yc * coefficientsWidth;
final int iyi = yi * w;
for (int xi = 0; xi < w; ++xi) cp.set(iyi + xi, ic + (xi * coefficientsWidth / w) + 1);
}
/* attach mipmap transformation */
final CoordinateTransformList<CoordinateTransform> ctlMipmap = new CoordinateTransformList<CoordinateTransform>();
ctlMipmap.add(createScaleLevelTransform(mipmapLevel));
ctlMipmap.add(ctl);
/* create mesh */
final CoordinateTransformMesh mesh = new CoordinateTransformMesh(ctlMipmap, patch.getMeshResolution(), ipMipmap.getWidth(), ipMipmap.getHeight());
/* render */
final ImageProcessorWithMasks source = new ImageProcessorWithMasks(ipMipmap, bpMaskMipmap, null);
final ImageProcessorWithMasks target = new ImageProcessorWithMasks(tp, bpMaskTarget, null);
final TransformMeshMappingWithMasks<TransformMesh> mapping = new TransformMeshMappingWithMasks<TransformMesh>(mesh);
mapping.mapInterpolated(source, target, 1);
final TransformMeshMapping<TransformMesh> coefficientsMapMapping = new TransformMeshMapping<TransformMesh>(mesh);
coefficientsMapMapping.map(cp, targetCoefficients, 1);
/* set alpha channel */
final byte[] alphaPixels;
if (bpMaskTarget != null)
alphaPixels = (byte[]) bpMaskTarget.getPixels();
else
alphaPixels = (byte[]) target.outside.getPixels();
/* convert */
final double min = patch.getMin();
final double max = patch.getMax();
final double a = 1.0 / (max - min);
final double b = 1.0 / 255.0;
for (int i = 0; i < alphaPixels.length; ++i) targetImage.setf(i, (float) ((tp.getf(i) - min) * a));
for (int i = 0; i < alphaPixels.length; ++i) targetWeight.setf(i, (float) ((alphaPixels[i] & 0xff) * b));
}
Aggregations