use of java.awt.image.ColorModel in project intellij-community by JetBrains.
the class ImageEditorUI method updateInfo.
private void updateInfo() {
ImageDocument document = imageComponent.getDocument();
BufferedImage image = document.getValue();
if (image != null) {
ColorModel colorModel = image.getColorModel();
String format = document.getFormat();
if (format == null) {
format = editor != null ? ImagesBundle.message("unknown.format") : "";
} else {
format = format.toUpperCase(Locale.ENGLISH);
}
VirtualFile file = editor != null ? editor.getFile() : null;
infoLabel.setText(ImagesBundle.message("image.info", image.getWidth(), image.getHeight(), format, colorModel.getPixelSize(), file != null ? StringUtil.formatFileSize(file.getLength()) : ""));
} else {
infoLabel.setText(null);
}
}
use of java.awt.image.ColorModel in project jdk8u_jdk by JetBrains.
the class XDataTransferer method getDefaultImageTypeSpecifier.
private ImageTypeSpecifier getDefaultImageTypeSpecifier() {
if (defaultSpecifier == null) {
ColorModel model = ColorModel.getRGBdefault();
WritableRaster raster = model.createCompatibleWritableRaster(10, 10);
BufferedImage bufferedImage = new BufferedImage(model, raster, model.isAlphaPremultiplied(), null);
defaultSpecifier = new ImageTypeSpecifier(bufferedImage);
}
return defaultSpecifier;
}
use of java.awt.image.ColorModel in project jdk8u_jdk by JetBrains.
the class WPathGraphics method drawImageToPlatform.
/**
* The various <code>drawImage()</code> methods for
* <code>WPathGraphics</code> are all decomposed
* into an invocation of <code>drawImageToPlatform</code>.
* The portion of the passed in image defined by
* <code>srcX, srcY, srcWidth, and srcHeight</code>
* is transformed by the supplied AffineTransform and
* drawn using GDI to the printer context.
*
* @param img The image to be drawn.
* @param xform Used to transform the image before drawing.
* This can be null.
* @param bgcolor This color is drawn where the image has transparent
* pixels. If this parameter is null then the
* pixels already in the destination should show
* through.
* @param srcX With srcY this defines the upper-left corner
* of the portion of the image to be drawn.
*
* @param srcY With srcX this defines the upper-left corner
* of the portion of the image to be drawn.
* @param srcWidth The width of the portion of the image to
* be drawn.
* @param srcHeight The height of the portion of the image to
* be drawn.
* @param handlingTransparency if being recursively called to
* print opaque region of transparent image
*/
@Override
protected boolean drawImageToPlatform(Image image, AffineTransform xform, Color bgcolor, int srcX, int srcY, int srcWidth, int srcHeight, boolean handlingTransparency) {
BufferedImage img = getBufferedImage(image);
if (img == null) {
return true;
}
WPrinterJob wPrinterJob = (WPrinterJob) getPrinterJob();
/* The full transform to be applied to the image is the
* caller's transform concatenated on to the transform
* from user space to device space. If the caller didn't
* supply a transform then we just act as if they passed
* in the identify transform.
*/
AffineTransform fullTransform = getTransform();
if (xform == null) {
xform = new AffineTransform();
}
fullTransform.concatenate(xform);
/* Split the full transform into a pair of
* transforms. The first transform holds effects
* that GDI (under Win95) can not perform such
* as rotation and shearing. The second transform
* is setup to hold only the scaling effects.
* These transforms are created such that a point,
* p, in user space, when transformed by 'fullTransform'
* lands in the same place as when it is transformed
* by 'rotTransform' and then 'scaleTransform'.
*
* The entire image transformation is not in Java in order
* to minimize the amount of memory needed in the VM. By
* dividing the transform in two, we rotate and shear
* the source image in its own space and only go to
* the, usually, larger, device space when we ask
* GDI to perform the final scaling.
* Clamp this to the device scale for better quality printing.
*/
double[] fullMatrix = new double[6];
fullTransform.getMatrix(fullMatrix);
/* Calculate the amount of scaling in the x
* and y directions. This scaling is computed by
* transforming a unit vector along each axis
* and computing the resulting magnitude.
* The computed values 'scaleX' and 'scaleY'
* represent the amount of scaling GDI will be asked
* to perform.
*/
Point2D.Float unitVectorX = new Point2D.Float(1, 0);
Point2D.Float unitVectorY = new Point2D.Float(0, 1);
fullTransform.deltaTransform(unitVectorX, unitVectorX);
fullTransform.deltaTransform(unitVectorY, unitVectorY);
Point2D.Float origin = new Point2D.Float(0, 0);
double scaleX = unitVectorX.distance(origin);
double scaleY = unitVectorY.distance(origin);
double devResX = wPrinterJob.getXRes();
double devResY = wPrinterJob.getYRes();
double devScaleX = devResX / DEFAULT_USER_RES;
double devScaleY = devResY / DEFAULT_USER_RES;
/* check if rotated or sheared */
int transformType = fullTransform.getType();
boolean clampScale = ((transformType & (AffineTransform.TYPE_GENERAL_ROTATION | AffineTransform.TYPE_GENERAL_TRANSFORM)) != 0);
if (clampScale) {
if (scaleX > devScaleX)
scaleX = devScaleX;
if (scaleY > devScaleY)
scaleY = devScaleY;
}
/* We do not need to draw anything if either scaling
* factor is zero.
*/
if (scaleX != 0 && scaleY != 0) {
/* Here's the transformation we will do with Java2D,
*/
AffineTransform rotTransform = new AffineTransform(//m00
fullMatrix[0] / scaleX, //m10
fullMatrix[1] / scaleY, //m01
fullMatrix[2] / scaleX, //m11
fullMatrix[3] / scaleY, //m02
fullMatrix[4] / scaleX, //m12
fullMatrix[5] / scaleY);
/* The scale transform is not used directly: we instead
* directly multiply by scaleX and scaleY.
*
* Conceptually here is what the scaleTransform is:
*
* AffineTransform scaleTransform = new AffineTransform(
* scaleX, //m00
* 0, //m10
* 0, //m01
* scaleY, //m11
* 0, //m02
* 0); //m12
*/
/* Convert the image source's rectangle into the rotated
* and sheared space. Once there, we calculate a rectangle
* that encloses the resulting shape. It is this rectangle
* which defines the size of the BufferedImage we need to
* create to hold the transformed image.
*/
Rectangle2D.Float srcRect = new Rectangle2D.Float(srcX, srcY, srcWidth, srcHeight);
Shape rotShape = rotTransform.createTransformedShape(srcRect);
Rectangle2D rotBounds = rotShape.getBounds2D();
/* add a fudge factor as some fp precision problems have
* been observed which caused pixels to be rounded down and
* out of the image.
*/
rotBounds.setRect(rotBounds.getX(), rotBounds.getY(), rotBounds.getWidth() + 0.001, rotBounds.getHeight() + 0.001);
int boundsWidth = (int) rotBounds.getWidth();
int boundsHeight = (int) rotBounds.getHeight();
if (boundsWidth > 0 && boundsHeight > 0) {
/* If the image has transparent or semi-transparent
* pixels then we'll have the application re-render
* the portion of the page covered by the image.
* The BufferedImage will be at the image's resolution
* to avoid wasting memory. By re-rendering this portion
* of a page all compositing is done by Java2D into
* the BufferedImage and then that image is copied to
* GDI.
* However several special cases can be handled otherwise:
* - bitmask transparency with a solid background colour
* - images which have transparency color models but no
* transparent pixels
* - images with bitmask transparency and an IndexColorModel
* (the common transparent GIF case) can be handled by
* rendering just the opaque pixels.
*/
boolean drawOpaque = true;
if (!handlingTransparency && hasTransparentPixels(img)) {
drawOpaque = false;
if (isBitmaskTransparency(img)) {
if (bgcolor == null) {
if (drawBitmaskImage(img, xform, bgcolor, srcX, srcY, srcWidth, srcHeight)) {
// image drawn, just return.
return true;
}
} else if (bgcolor.getTransparency() == Transparency.OPAQUE) {
drawOpaque = true;
}
}
if (!canDoRedraws()) {
drawOpaque = true;
}
} else {
// if there's no transparent pixels there's no need
// for a background colour. This can avoid edge artifacts
// in rotation cases.
bgcolor = null;
}
// may blit b/g colour (including white) where it shoudn't.
if ((srcX + srcWidth > img.getWidth(null) || srcY + srcHeight > img.getHeight(null)) && canDoRedraws()) {
drawOpaque = false;
}
if (drawOpaque == false) {
fullTransform.getMatrix(fullMatrix);
AffineTransform tx = new AffineTransform(//m00
fullMatrix[0] / devScaleX, //m10
fullMatrix[1] / devScaleY, //m01
fullMatrix[2] / devScaleX, //m11
fullMatrix[3] / devScaleY, //m02
fullMatrix[4] / devScaleX, //m12
fullMatrix[5] / devScaleY);
Rectangle2D.Float rect = new Rectangle2D.Float(srcX, srcY, srcWidth, srcHeight);
Shape shape = fullTransform.createTransformedShape(rect);
// Region isn't user space because its potentially
// been rotated for landscape.
Rectangle2D region = shape.getBounds2D();
region.setRect(region.getX(), region.getY(), region.getWidth() + 0.001, region.getHeight() + 0.001);
// Try to limit the amount of memory used to 8Mb, so
// if at device resolution this exceeds a certain
// image size then scale down the region to fit in
// that memory, but never to less than 72 dpi.
int w = (int) region.getWidth();
int h = (int) region.getHeight();
int nbytes = w * h * 3;
int maxBytes = 8 * 1024 * 1024;
double origDpi = (devResX < devResY) ? devResX : devResY;
int dpi = (int) origDpi;
double scaleFactor = 1;
double maxSFX = w / (double) boundsWidth;
double maxSFY = h / (double) boundsHeight;
double maxSF = (maxSFX > maxSFY) ? maxSFY : maxSFX;
int minDpi = (int) (dpi / maxSF);
if (minDpi < DEFAULT_USER_RES)
minDpi = DEFAULT_USER_RES;
while (nbytes > maxBytes && dpi > minDpi) {
scaleFactor *= 2;
dpi /= 2;
nbytes /= 4;
}
if (dpi < minDpi) {
scaleFactor = (origDpi / minDpi);
}
region.setRect(region.getX() / scaleFactor, region.getY() / scaleFactor, region.getWidth() / scaleFactor, region.getHeight() / scaleFactor);
/*
* We need to have the clip as part of the saved state,
* either directly, or all the components that are
* needed to reconstitute it (image source area,
* image transform and current graphics transform).
* The clip is described in user space, so we need to
* save the current graphics transform anyway so just
* save these two.
*/
wPrinterJob.saveState(getTransform(), getClip(), region, scaleFactor, scaleFactor);
return true;
/* The image can be rendered directly by GDI so we
* copy it into a BufferedImage (this takes care of
* ColorSpace and BufferedImageOp issues) and then
* send that to GDI.
*/
} else {
/* Create a buffered image big enough to hold the portion
* of the source image being printed.
* The image format will be 3BYTE_BGR for most cases
* except where we can represent the image as a 1, 4 or 8
* bits-per-pixel DIB.
*/
int dibType = BufferedImage.TYPE_3BYTE_BGR;
IndexColorModel icm = null;
ColorModel cm = img.getColorModel();
int imgType = img.getType();
if (cm instanceof IndexColorModel && cm.getPixelSize() <= 8 && (imgType == BufferedImage.TYPE_BYTE_BINARY || imgType == BufferedImage.TYPE_BYTE_INDEXED)) {
icm = (IndexColorModel) cm;
dibType = imgType;
/* BYTE_BINARY may be 2 bpp which DIB can't handle.
* Convert this to 4bpp.
*/
if (imgType == BufferedImage.TYPE_BYTE_BINARY && cm.getPixelSize() == 2) {
int[] rgbs = new int[16];
icm.getRGBs(rgbs);
boolean transparent = icm.getTransparency() != Transparency.OPAQUE;
int transpixel = icm.getTransparentPixel();
icm = new IndexColorModel(4, 16, rgbs, 0, transparent, transpixel, DataBuffer.TYPE_BYTE);
}
}
int iw = (int) rotBounds.getWidth();
int ih = (int) rotBounds.getHeight();
BufferedImage deepImage = null;
/* If there is no special transform needed (this is a
* simple BLIT) and dibType == img.getType() and we
* didn't create a new IndexColorModel AND the whole of
* the source image is being drawn (GDI can't handle a
* portion of the original source image) then we
* don't need to create this intermediate image - GDI
* can access the data from the original image.
* Since a subimage can be created by calling
* BufferedImage.getSubImage() that condition needs to
* be accounted for too. This implies inspecting the
* data buffer. In the end too many cases are not able
* to take advantage of this option until we can teach
* the native code to properly navigate the data buffer.
* There was a concern that since in native code since we
* need to DWORD align and flip to a bottom up DIB that
* the "original" image may get perturbed by this.
* But in fact we always malloc new memory for the aligned
* copy so this isn't a problem.
* This points out that we allocate two temporaries copies
* of the image : one in Java and one in native. If
* we can be smarter about not allocating this one when
* not needed, that would seem like a good thing to do,
* even if in many cases the ColorModels don't match and
* its needed.
* Until all of this is resolved newImage is always true.
*/
boolean newImage = true;
if (newImage) {
if (icm == null) {
deepImage = new BufferedImage(iw, ih, dibType);
} else {
deepImage = new BufferedImage(iw, ih, dibType, icm);
}
/* Setup a Graphics2D on to the BufferedImage so that
* the source image when copied, lands within the
* image buffer.
*/
Graphics2D imageGraphics = deepImage.createGraphics();
imageGraphics.clipRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
imageGraphics.translate(-rotBounds.getX(), -rotBounds.getY());
imageGraphics.transform(rotTransform);
/* Fill the BufferedImage either with the caller
* supplied color, 'bgColor' or, if null, with white.
*/
if (bgcolor == null) {
bgcolor = Color.white;
}
imageGraphics.drawImage(img, srcX, srcY, srcX + srcWidth, srcY + srcHeight, srcX, srcY, srcX + srcWidth, srcY + srcHeight, bgcolor, null);
imageGraphics.dispose();
} else {
deepImage = img;
}
/* Scale the bounding rectangle by the scale transform.
* Because the scaling transform has only x and y
* scaling components it is equivalent to multiply
* the x components of the bounding rectangle by
* the x scaling factor and to multiply the y components
* by the y scaling factor.
*/
Rectangle2D.Float scaledBounds = new Rectangle2D.Float((float) (rotBounds.getX() * scaleX), (float) (rotBounds.getY() * scaleY), (float) (rotBounds.getWidth() * scaleX), (float) (rotBounds.getHeight() * scaleY));
/* Pull the raster data from the buffered image
* and pass it along to GDI.
*/
WritableRaster raster = deepImage.getRaster();
byte[] data;
if (raster instanceof ByteComponentRaster) {
data = ((ByteComponentRaster) raster).getDataStorage();
} else if (raster instanceof BytePackedRaster) {
data = ((BytePackedRaster) raster).getDataStorage();
} else {
return false;
}
int bitsPerPixel = 24;
SampleModel sm = deepImage.getSampleModel();
if (sm instanceof ComponentSampleModel) {
ComponentSampleModel csm = (ComponentSampleModel) sm;
bitsPerPixel = csm.getPixelStride() * 8;
} else if (sm instanceof MultiPixelPackedSampleModel) {
MultiPixelPackedSampleModel mppsm = (MultiPixelPackedSampleModel) sm;
bitsPerPixel = mppsm.getPixelBitStride();
} else {
if (icm != null) {
int diw = deepImage.getWidth();
int dih = deepImage.getHeight();
if (diw > 0 && dih > 0) {
bitsPerPixel = data.length * 8 / diw / dih;
}
}
}
/* Because the caller's image has been rotated
* and sheared into our BufferedImage and because
* we will be handing that BufferedImage directly to
* GDI, we need to set an additional clip. This clip
* makes sure that only parts of the BufferedImage
* that are also part of the caller's image are drawn.
*/
Shape holdClip = getClip();
clip(xform.createTransformedShape(srcRect));
deviceClip(getClip().getPathIterator(getTransform()));
wPrinterJob.drawDIBImage(data, scaledBounds.x, scaledBounds.y, (float) Math.rint(scaledBounds.width + 0.5), (float) Math.rint(scaledBounds.height + 0.5), 0f, 0f, deepImage.getWidth(), deepImage.getHeight(), bitsPerPixel, icm);
setClip(holdClip);
}
}
}
return true;
}
use of java.awt.image.ColorModel in project jdk8u_jdk by JetBrains.
the class PathGraphics method drawBitmaskImage.
/* An optimisation for the special case of ICM images which have
* bitmask transparency.
*/
protected boolean drawBitmaskImage(BufferedImage bufferedImage, AffineTransform xform, Color bgcolor, int srcX, int srcY, int srcWidth, int srcHeight) {
ColorModel colorModel = bufferedImage.getColorModel();
IndexColorModel icm;
int[] pixels;
if (!(colorModel instanceof IndexColorModel)) {
return false;
} else {
icm = (IndexColorModel) colorModel;
}
if (colorModel.getTransparency() != ColorModel.BITMASK) {
return false;
}
// with alpha 128 as opaque
if (bgcolor != null && bgcolor.getAlpha() < 128) {
return false;
}
if ((xform.getType() & ~(AffineTransform.TYPE_UNIFORM_SCALE | AffineTransform.TYPE_TRANSLATION | AffineTransform.TYPE_QUADRANT_ROTATION)) != 0) {
return false;
}
if ((getTransform().getType() & ~(AffineTransform.TYPE_UNIFORM_SCALE | AffineTransform.TYPE_TRANSLATION | AffineTransform.TYPE_QUADRANT_ROTATION)) != 0) {
return false;
}
BufferedImage subImage = null;
Raster raster = bufferedImage.getRaster();
int transpixel = icm.getTransparentPixel();
byte[] alphas = new byte[icm.getMapSize()];
icm.getAlphas(alphas);
if (transpixel >= 0) {
alphas[transpixel] = 0;
}
/* don't just use srcWidth & srcHeight from application - they
* may exceed the extent of the image - may need to clip.
* The image xform will ensure that points are still mapped properly.
*/
int rw = raster.getWidth();
int rh = raster.getHeight();
if (srcX > rw || srcY > rh) {
return false;
}
int right, bottom, wid, hgt;
if (srcX + srcWidth > rw) {
right = rw;
wid = right - srcX;
} else {
right = srcX + srcWidth;
wid = srcWidth;
}
if (srcY + srcHeight > rh) {
bottom = rh;
hgt = bottom - srcY;
} else {
bottom = srcY + srcHeight;
hgt = srcHeight;
}
pixels = new int[wid];
for (int j = srcY; j < bottom; j++) {
int startx = -1;
raster.getPixels(srcX, j, wid, 1, pixels);
for (int i = srcX; i < right; i++) {
if (alphas[pixels[i - srcX]] == 0) {
if (startx >= 0) {
subImage = bufferedImage.getSubimage(startx, j, i - startx, 1);
xform.translate(startx, j);
drawImageToPlatform(subImage, xform, bgcolor, 0, 0, i - startx, 1, true);
xform.translate(-startx, -j);
startx = -1;
}
} else if (startx < 0) {
startx = i;
}
}
if (startx >= 0) {
subImage = bufferedImage.getSubimage(startx, j, right - startx, 1);
xform.translate(startx, j);
drawImageToPlatform(subImage, xform, bgcolor, 0, 0, right - startx, 1, true);
xform.translate(-startx, -j);
}
}
return true;
}
use of java.awt.image.ColorModel in project jdk8u_jdk by JetBrains.
the class GLXVolatileSurfaceManager method initAcceleratedSurface.
/**
* Create a pbuffer-based SurfaceData object (or init the backbuffer
* of an existing window if this is a double buffered GraphicsConfig)
*/
protected SurfaceData initAcceleratedSurface() {
SurfaceData sData;
Component comp = vImg.getComponent();
X11ComponentPeer peer = (comp != null) ? (X11ComponentPeer) comp.getPeer() : null;
try {
boolean createVSynced = false;
boolean forceback = false;
if (context instanceof Boolean) {
forceback = ((Boolean) context).booleanValue();
if (forceback && peer instanceof BackBufferCapsProvider) {
BackBufferCapsProvider provider = (BackBufferCapsProvider) peer;
BufferCapabilities caps = provider.getBackBufferCaps();
if (caps instanceof ExtendedBufferCapabilities) {
ExtendedBufferCapabilities ebc = (ExtendedBufferCapabilities) caps;
if (ebc.getVSync() == VSYNC_ON && ebc.getFlipContents() == COPIED) {
createVSynced = true;
forceback = false;
}
}
}
}
if (forceback) {
// peer must be non-null in this case
sData = GLXSurfaceData.createData(peer, vImg, FLIP_BACKBUFFER);
} else {
GLXGraphicsConfig gc = (GLXGraphicsConfig) vImg.getGraphicsConfig();
ColorModel cm = gc.getColorModel(vImg.getTransparency());
int type = vImg.getForcedAccelSurfaceType();
// use the forced type, otherwise choose one based on caps
if (type == OGLSurfaceData.UNDEFINED) {
type = gc.isCapPresent(CAPS_EXT_FBOBJECT) ? OGLSurfaceData.FBOBJECT : OGLSurfaceData.PBUFFER;
}
if (createVSynced) {
sData = GLXSurfaceData.createData(peer, vImg, type);
} else {
sData = GLXSurfaceData.createData(gc, vImg.getWidth(), vImg.getHeight(), cm, vImg, type);
}
}
} catch (NullPointerException ex) {
sData = null;
} catch (OutOfMemoryError er) {
sData = null;
}
return sData;
}
Aggregations