use of java.awt.image.IndexColorModel in project jdk8u_jdk by JetBrains.
the class DrawImage method getTransformColorModel.
/*
* Return the color model to be used with this BufferedImage and
* transform.
*/
private ColorModel getTransformColorModel(SunGraphics2D sg, BufferedImage bImg, AffineTransform tx) {
ColorModel cm = bImg.getColorModel();
ColorModel dstCM = cm;
if (tx.isIdentity()) {
return dstCM;
}
int type = tx.getType();
boolean needTrans = ((type & (AffineTransform.TYPE_MASK_ROTATION | AffineTransform.TYPE_GENERAL_TRANSFORM)) != 0);
if (!needTrans && type != AffineTransform.TYPE_TRANSLATION && type != AffineTransform.TYPE_IDENTITY) {
double[] mtx = new double[4];
tx.getMatrix(mtx);
// Check out the matrix. A non-integral scale will force ARGB
// since the edge conditions cannot be guaranteed.
needTrans = (mtx[0] != (int) mtx[0] || mtx[3] != (int) mtx[3]);
}
if (sg.renderHint != SunHints.INTVAL_RENDER_QUALITY) {
if (cm instanceof IndexColorModel) {
Raster raster = bImg.getRaster();
IndexColorModel icm = (IndexColorModel) cm;
// Just need to make sure that we have a transparent pixel
if (needTrans && cm.getTransparency() == Transparency.OPAQUE) {
// Fix 4221407
if (raster instanceof sun.awt.image.BytePackedRaster) {
dstCM = ColorModel.getRGBdefault();
} else {
double[] matrix = new double[6];
tx.getMatrix(matrix);
if (matrix[1] == 0. && matrix[2] == 0. && matrix[4] == 0. && matrix[5] == 0.) {
// Only scaling so do not need to create
} else {
int mapSize = icm.getMapSize();
if (mapSize < 256) {
int[] cmap = new int[mapSize + 1];
icm.getRGBs(cmap);
cmap[mapSize] = 0x0000;
dstCM = new IndexColorModel(icm.getPixelSize(), mapSize + 1, cmap, 0, true, mapSize, DataBuffer.TYPE_BYTE);
} else {
dstCM = ColorModel.getRGBdefault();
}
}
/* if (matrix[0] < 1.f ...) */
}
/* raster instanceof sun.awt.image.BytePackedRaster */
}
/* if (cm.getTransparency() == cm.OPAQUE) */
} else /* if (cm instanceof IndexColorModel) */
if (needTrans && cm.getTransparency() == Transparency.OPAQUE) {
// Need a bitmask transparency
// REMIND: for now, use full transparency since no loops
// for bitmask
dstCM = ColorModel.getRGBdefault();
}
} else /* if (sg.renderHint == RENDER_QUALITY) */
{
if (cm instanceof IndexColorModel || (needTrans && cm.getTransparency() == Transparency.OPAQUE)) {
// Need a bitmask transparency
// REMIND: for now, use full transparency since no loops
// for bitmask
dstCM = ColorModel.getRGBdefault();
}
}
return dstCM;
}
use of java.awt.image.IndexColorModel in project AozoraEpub3 by hmdev.
the class ImageUtils method getGray16ColorModel.
/** 4bitグレースケール時のRGB階調カラーモデル取得 */
static ColorModel getGray16ColorModel() {
if (GRAY16_COLOR_MODEL == null) {
byte[] GRAY16_VALUES = new byte[] { 0, 17, 34, 51, 68, 85, 102, 119, -120, -103, -86, -69, -52, -35, -18, -1 };
GRAY16_COLOR_MODEL = new IndexColorModel(4, GRAY16_VALUES.length, GRAY16_VALUES, GRAY16_VALUES, GRAY16_VALUES);
}
return GRAY16_COLOR_MODEL;
}
use of java.awt.image.IndexColorModel in project scriptographer by scriptographer.
the class Raster method getColorModel.
/**
* The Java2D color model of the raster.
* @jshide
*/
public ColorModel getColorModel() {
ColorType type = getType();
ColorModel cm = null;
switch(type) {
case RGB:
case ARGB:
cm = new ComponentColorModel(RGBColor.getColorSpace(), type.alpha ? new int[] { 8, 8, 8, 8 } : new int[] { 8, 8, 8 }, type.alpha, false, type.alpha ? Transparency.TRANSLUCENT : Transparency.OPAQUE, DataBuffer.TYPE_BYTE);
break;
case CMYK:
case ACMYK:
cm = new ComponentColorModel(CMYKColor.getColorSpace(), type.alpha ? new int[] { 8, 8, 8, 8, 8 } : new int[] { 8, 8, 8, 8 }, type.alpha, false, type.alpha ? Transparency.TRANSLUCENT : Transparency.OPAQUE, DataBuffer.TYPE_BYTE);
break;
case GRAY:
case AGRAY:
cm = new ComponentColorModel(GrayColor.getColorSpace(), type.alpha ? new int[] { 8, 8 } : new int[] { 8 }, type.alpha, false, type.alpha ? Transparency.TRANSLUCENT : Transparency.OPAQUE, DataBuffer.TYPE_BYTE);
break;
case BITMAP:
case ABITMAP:
// create an IndexColorModel with two colors, black and white:
// black is the transparent color in case of an alpha image
cm = new IndexColorModel(2, 2, new byte[] { 0, (byte) 255 }, new byte[] { 0, (byte) 255 }, new byte[] { 0, (byte) 255 }, type.alpha ? 0 : -1);
break;
}
return cm;
}
use of java.awt.image.IndexColorModel in project cloudstack by apache.
the class ServerPaletteUpdate method handleData.
@Override
public void handleData(ByteBuffer buf, Link link) {
if (verbose)
System.out.println("[" + this + "] INFO: Data received: " + buf + ".");
// (2 bytes): A 16-bit, unsigned integer. The update type. This field MUST
// be set to UPDATETYPE_PALETTE (0x0002).
int updateType = buf.readUnsignedShortLE();
if (updateType != UPDATETYPE_PALETTE)
throw new RuntimeException("Unexpected update type. Expected type: UPDATETYPE_PALETTE (0x0002), actual value: " + updateType + ", data: " + buf + ".");
// pad2Octets (2 bytes): A 16-bit, unsigned integer. Padding. Values in this
// field MUST be ignored.
buf.skipBytes(2);
// (4 bytes): A 32-bit, unsigned integer. The number of RGB triplets in the
// paletteData field. This field MUST be set to 256 (the number of entries
// in an 8 bpp palette).
int numberColors = (int) buf.readUnsignedIntLE();
if (numberColors != 256)
throw new RuntimeException("Unexpected value for number of color field in server Palette Update packet. Expected value: 256 colors, actual value: " + numberColors + ", data: " + buf + ".");
// (variable): An array of palette entries in RGB triplet format packed on
// byte boundaries. The number of triplet entries is given by the
// numberColors field.
ByteBuffer paletteEntries = buf.readBytes(numberColors * 3);
// In the case of a Palette Update, the client MUST update the global
// palette on all drawing surfaces
screen.colorMap = new IndexColorModel(8, numberColors, paletteEntries.data, paletteEntries.offset, false);
/* DEBUG */
buf.assertThatBufferIsFullyRead();
buf.unref();
}
use of java.awt.image.IndexColorModel in project jdk8u_jdk by JetBrains.
the class WPathGraphics method drawImageToPlatform.
/**
* The various <code>drawImage()</code> methods for
* <code>WPathGraphics</code> are all decomposed
* into an invocation of <code>drawImageToPlatform</code>.
* The portion of the passed in image defined by
* <code>srcX, srcY, srcWidth, and srcHeight</code>
* is transformed by the supplied AffineTransform and
* drawn using GDI to the printer context.
*
* @param img The image to be drawn.
* @param xform Used to transform the image before drawing.
* This can be null.
* @param bgcolor This color is drawn where the image has transparent
* pixels. If this parameter is null then the
* pixels already in the destination should show
* through.
* @param srcX With srcY this defines the upper-left corner
* of the portion of the image to be drawn.
*
* @param srcY With srcX this defines the upper-left corner
* of the portion of the image to be drawn.
* @param srcWidth The width of the portion of the image to
* be drawn.
* @param srcHeight The height of the portion of the image to
* be drawn.
* @param handlingTransparency if being recursively called to
* print opaque region of transparent image
*/
@Override
protected boolean drawImageToPlatform(Image image, AffineTransform xform, Color bgcolor, int srcX, int srcY, int srcWidth, int srcHeight, boolean handlingTransparency) {
BufferedImage img = getBufferedImage(image);
if (img == null) {
return true;
}
WPrinterJob wPrinterJob = (WPrinterJob) getPrinterJob();
/* The full transform to be applied to the image is the
* caller's transform concatenated on to the transform
* from user space to device space. If the caller didn't
* supply a transform then we just act as if they passed
* in the identify transform.
*/
AffineTransform fullTransform = getTransform();
if (xform == null) {
xform = new AffineTransform();
}
fullTransform.concatenate(xform);
/* Split the full transform into a pair of
* transforms. The first transform holds effects
* that GDI (under Win95) can not perform such
* as rotation and shearing. The second transform
* is setup to hold only the scaling effects.
* These transforms are created such that a point,
* p, in user space, when transformed by 'fullTransform'
* lands in the same place as when it is transformed
* by 'rotTransform' and then 'scaleTransform'.
*
* The entire image transformation is not in Java in order
* to minimize the amount of memory needed in the VM. By
* dividing the transform in two, we rotate and shear
* the source image in its own space and only go to
* the, usually, larger, device space when we ask
* GDI to perform the final scaling.
* Clamp this to the device scale for better quality printing.
*/
double[] fullMatrix = new double[6];
fullTransform.getMatrix(fullMatrix);
/* Calculate the amount of scaling in the x
* and y directions. This scaling is computed by
* transforming a unit vector along each axis
* and computing the resulting magnitude.
* The computed values 'scaleX' and 'scaleY'
* represent the amount of scaling GDI will be asked
* to perform.
*/
Point2D.Float unitVectorX = new Point2D.Float(1, 0);
Point2D.Float unitVectorY = new Point2D.Float(0, 1);
fullTransform.deltaTransform(unitVectorX, unitVectorX);
fullTransform.deltaTransform(unitVectorY, unitVectorY);
Point2D.Float origin = new Point2D.Float(0, 0);
double scaleX = unitVectorX.distance(origin);
double scaleY = unitVectorY.distance(origin);
double devResX = wPrinterJob.getXRes();
double devResY = wPrinterJob.getYRes();
double devScaleX = devResX / DEFAULT_USER_RES;
double devScaleY = devResY / DEFAULT_USER_RES;
/* check if rotated or sheared */
int transformType = fullTransform.getType();
boolean clampScale = ((transformType & (AffineTransform.TYPE_GENERAL_ROTATION | AffineTransform.TYPE_GENERAL_TRANSFORM)) != 0);
if (clampScale) {
if (scaleX > devScaleX)
scaleX = devScaleX;
if (scaleY > devScaleY)
scaleY = devScaleY;
}
/* We do not need to draw anything if either scaling
* factor is zero.
*/
if (scaleX != 0 && scaleY != 0) {
/* Here's the transformation we will do with Java2D,
*/
AffineTransform rotTransform = new AffineTransform(//m00
fullMatrix[0] / scaleX, //m10
fullMatrix[1] / scaleY, //m01
fullMatrix[2] / scaleX, //m11
fullMatrix[3] / scaleY, //m02
fullMatrix[4] / scaleX, //m12
fullMatrix[5] / scaleY);
/* The scale transform is not used directly: we instead
* directly multiply by scaleX and scaleY.
*
* Conceptually here is what the scaleTransform is:
*
* AffineTransform scaleTransform = new AffineTransform(
* scaleX, //m00
* 0, //m10
* 0, //m01
* scaleY, //m11
* 0, //m02
* 0); //m12
*/
/* Convert the image source's rectangle into the rotated
* and sheared space. Once there, we calculate a rectangle
* that encloses the resulting shape. It is this rectangle
* which defines the size of the BufferedImage we need to
* create to hold the transformed image.
*/
Rectangle2D.Float srcRect = new Rectangle2D.Float(srcX, srcY, srcWidth, srcHeight);
Shape rotShape = rotTransform.createTransformedShape(srcRect);
Rectangle2D rotBounds = rotShape.getBounds2D();
/* add a fudge factor as some fp precision problems have
* been observed which caused pixels to be rounded down and
* out of the image.
*/
rotBounds.setRect(rotBounds.getX(), rotBounds.getY(), rotBounds.getWidth() + 0.001, rotBounds.getHeight() + 0.001);
int boundsWidth = (int) rotBounds.getWidth();
int boundsHeight = (int) rotBounds.getHeight();
if (boundsWidth > 0 && boundsHeight > 0) {
/* If the image has transparent or semi-transparent
* pixels then we'll have the application re-render
* the portion of the page covered by the image.
* The BufferedImage will be at the image's resolution
* to avoid wasting memory. By re-rendering this portion
* of a page all compositing is done by Java2D into
* the BufferedImage and then that image is copied to
* GDI.
* However several special cases can be handled otherwise:
* - bitmask transparency with a solid background colour
* - images which have transparency color models but no
* transparent pixels
* - images with bitmask transparency and an IndexColorModel
* (the common transparent GIF case) can be handled by
* rendering just the opaque pixels.
*/
boolean drawOpaque = true;
if (!handlingTransparency && hasTransparentPixels(img)) {
drawOpaque = false;
if (isBitmaskTransparency(img)) {
if (bgcolor == null) {
if (drawBitmaskImage(img, xform, bgcolor, srcX, srcY, srcWidth, srcHeight)) {
// image drawn, just return.
return true;
}
} else if (bgcolor.getTransparency() == Transparency.OPAQUE) {
drawOpaque = true;
}
}
if (!canDoRedraws()) {
drawOpaque = true;
}
} else {
// if there's no transparent pixels there's no need
// for a background colour. This can avoid edge artifacts
// in rotation cases.
bgcolor = null;
}
// may blit b/g colour (including white) where it shoudn't.
if ((srcX + srcWidth > img.getWidth(null) || srcY + srcHeight > img.getHeight(null)) && canDoRedraws()) {
drawOpaque = false;
}
if (drawOpaque == false) {
fullTransform.getMatrix(fullMatrix);
AffineTransform tx = new AffineTransform(//m00
fullMatrix[0] / devScaleX, //m10
fullMatrix[1] / devScaleY, //m01
fullMatrix[2] / devScaleX, //m11
fullMatrix[3] / devScaleY, //m02
fullMatrix[4] / devScaleX, //m12
fullMatrix[5] / devScaleY);
Rectangle2D.Float rect = new Rectangle2D.Float(srcX, srcY, srcWidth, srcHeight);
Shape shape = fullTransform.createTransformedShape(rect);
// Region isn't user space because its potentially
// been rotated for landscape.
Rectangle2D region = shape.getBounds2D();
region.setRect(region.getX(), region.getY(), region.getWidth() + 0.001, region.getHeight() + 0.001);
// Try to limit the amount of memory used to 8Mb, so
// if at device resolution this exceeds a certain
// image size then scale down the region to fit in
// that memory, but never to less than 72 dpi.
int w = (int) region.getWidth();
int h = (int) region.getHeight();
int nbytes = w * h * 3;
int maxBytes = 8 * 1024 * 1024;
double origDpi = (devResX < devResY) ? devResX : devResY;
int dpi = (int) origDpi;
double scaleFactor = 1;
double maxSFX = w / (double) boundsWidth;
double maxSFY = h / (double) boundsHeight;
double maxSF = (maxSFX > maxSFY) ? maxSFY : maxSFX;
int minDpi = (int) (dpi / maxSF);
if (minDpi < DEFAULT_USER_RES)
minDpi = DEFAULT_USER_RES;
while (nbytes > maxBytes && dpi > minDpi) {
scaleFactor *= 2;
dpi /= 2;
nbytes /= 4;
}
if (dpi < minDpi) {
scaleFactor = (origDpi / minDpi);
}
region.setRect(region.getX() / scaleFactor, region.getY() / scaleFactor, region.getWidth() / scaleFactor, region.getHeight() / scaleFactor);
/*
* We need to have the clip as part of the saved state,
* either directly, or all the components that are
* needed to reconstitute it (image source area,
* image transform and current graphics transform).
* The clip is described in user space, so we need to
* save the current graphics transform anyway so just
* save these two.
*/
wPrinterJob.saveState(getTransform(), getClip(), region, scaleFactor, scaleFactor);
return true;
/* The image can be rendered directly by GDI so we
* copy it into a BufferedImage (this takes care of
* ColorSpace and BufferedImageOp issues) and then
* send that to GDI.
*/
} else {
/* Create a buffered image big enough to hold the portion
* of the source image being printed.
* The image format will be 3BYTE_BGR for most cases
* except where we can represent the image as a 1, 4 or 8
* bits-per-pixel DIB.
*/
int dibType = BufferedImage.TYPE_3BYTE_BGR;
IndexColorModel icm = null;
ColorModel cm = img.getColorModel();
int imgType = img.getType();
if (cm instanceof IndexColorModel && cm.getPixelSize() <= 8 && (imgType == BufferedImage.TYPE_BYTE_BINARY || imgType == BufferedImage.TYPE_BYTE_INDEXED)) {
icm = (IndexColorModel) cm;
dibType = imgType;
/* BYTE_BINARY may be 2 bpp which DIB can't handle.
* Convert this to 4bpp.
*/
if (imgType == BufferedImage.TYPE_BYTE_BINARY && cm.getPixelSize() == 2) {
int[] rgbs = new int[16];
icm.getRGBs(rgbs);
boolean transparent = icm.getTransparency() != Transparency.OPAQUE;
int transpixel = icm.getTransparentPixel();
icm = new IndexColorModel(4, 16, rgbs, 0, transparent, transpixel, DataBuffer.TYPE_BYTE);
}
}
int iw = (int) rotBounds.getWidth();
int ih = (int) rotBounds.getHeight();
BufferedImage deepImage = null;
/* If there is no special transform needed (this is a
* simple BLIT) and dibType == img.getType() and we
* didn't create a new IndexColorModel AND the whole of
* the source image is being drawn (GDI can't handle a
* portion of the original source image) then we
* don't need to create this intermediate image - GDI
* can access the data from the original image.
* Since a subimage can be created by calling
* BufferedImage.getSubImage() that condition needs to
* be accounted for too. This implies inspecting the
* data buffer. In the end too many cases are not able
* to take advantage of this option until we can teach
* the native code to properly navigate the data buffer.
* There was a concern that since in native code since we
* need to DWORD align and flip to a bottom up DIB that
* the "original" image may get perturbed by this.
* But in fact we always malloc new memory for the aligned
* copy so this isn't a problem.
* This points out that we allocate two temporaries copies
* of the image : one in Java and one in native. If
* we can be smarter about not allocating this one when
* not needed, that would seem like a good thing to do,
* even if in many cases the ColorModels don't match and
* its needed.
* Until all of this is resolved newImage is always true.
*/
boolean newImage = true;
if (newImage) {
if (icm == null) {
deepImage = new BufferedImage(iw, ih, dibType);
} else {
deepImage = new BufferedImage(iw, ih, dibType, icm);
}
/* Setup a Graphics2D on to the BufferedImage so that
* the source image when copied, lands within the
* image buffer.
*/
Graphics2D imageGraphics = deepImage.createGraphics();
imageGraphics.clipRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
imageGraphics.translate(-rotBounds.getX(), -rotBounds.getY());
imageGraphics.transform(rotTransform);
/* Fill the BufferedImage either with the caller
* supplied color, 'bgColor' or, if null, with white.
*/
if (bgcolor == null) {
bgcolor = Color.white;
}
imageGraphics.drawImage(img, srcX, srcY, srcX + srcWidth, srcY + srcHeight, srcX, srcY, srcX + srcWidth, srcY + srcHeight, bgcolor, null);
imageGraphics.dispose();
} else {
deepImage = img;
}
/* Scale the bounding rectangle by the scale transform.
* Because the scaling transform has only x and y
* scaling components it is equivalent to multiply
* the x components of the bounding rectangle by
* the x scaling factor and to multiply the y components
* by the y scaling factor.
*/
Rectangle2D.Float scaledBounds = new Rectangle2D.Float((float) (rotBounds.getX() * scaleX), (float) (rotBounds.getY() * scaleY), (float) (rotBounds.getWidth() * scaleX), (float) (rotBounds.getHeight() * scaleY));
/* Pull the raster data from the buffered image
* and pass it along to GDI.
*/
WritableRaster raster = deepImage.getRaster();
byte[] data;
if (raster instanceof ByteComponentRaster) {
data = ((ByteComponentRaster) raster).getDataStorage();
} else if (raster instanceof BytePackedRaster) {
data = ((BytePackedRaster) raster).getDataStorage();
} else {
return false;
}
int bitsPerPixel = 24;
SampleModel sm = deepImage.getSampleModel();
if (sm instanceof ComponentSampleModel) {
ComponentSampleModel csm = (ComponentSampleModel) sm;
bitsPerPixel = csm.getPixelStride() * 8;
} else if (sm instanceof MultiPixelPackedSampleModel) {
MultiPixelPackedSampleModel mppsm = (MultiPixelPackedSampleModel) sm;
bitsPerPixel = mppsm.getPixelBitStride();
} else {
if (icm != null) {
int diw = deepImage.getWidth();
int dih = deepImage.getHeight();
if (diw > 0 && dih > 0) {
bitsPerPixel = data.length * 8 / diw / dih;
}
}
}
/* Because the caller's image has been rotated
* and sheared into our BufferedImage and because
* we will be handing that BufferedImage directly to
* GDI, we need to set an additional clip. This clip
* makes sure that only parts of the BufferedImage
* that are also part of the caller's image are drawn.
*/
Shape holdClip = getClip();
clip(xform.createTransformedShape(srcRect));
deviceClip(getClip().getPathIterator(getTransform()));
wPrinterJob.drawDIBImage(data, scaledBounds.x, scaledBounds.y, (float) Math.rint(scaledBounds.width + 0.5), (float) Math.rint(scaledBounds.height + 0.5), 0f, 0f, deepImage.getWidth(), deepImage.getHeight(), bitsPerPixel, icm);
setClip(holdClip);
}
}
}
return true;
}
Aggregations