use of sun.awt.image.ByteComponentRaster in project jdk8u_jdk by JetBrains.
the class ImageRepresentation method setPixels.
public void setPixels(int x, int y, int w, int h, ColorModel model, byte[] pix, int off, int scansize) {
int lineOff = off;
int poff;
int[] newLUT = null;
if (src != null) {
src.checkSecurity(null, false);
}
// REMIND: What if the model doesn't fit in default color model?
synchronized (this) {
if (bimage == null) {
if (cmodel == null) {
cmodel = model;
}
createBufferedImage();
}
if (w <= 0 || h <= 0) {
return;
}
int biWidth = biRaster.getWidth();
int biHeight = biRaster.getHeight();
// Overflow protection below
int x1 = x + w;
// Overflow protection below
int y1 = y + h;
if (x < 0) {
off -= x;
x = 0;
} else if (x1 < 0) {
// Must be overflow
x1 = biWidth;
}
if (y < 0) {
off -= y * scansize;
y = 0;
} else if (y1 < 0) {
// Must be overflow
y1 = biHeight;
}
if (x1 > biWidth) {
x1 = biWidth;
}
if (y1 > biHeight) {
y1 = biHeight;
}
if (x >= x1 || y >= y1) {
return;
}
// x,y,x1,y1 are all >= 0, so w,h must be >= 0
w = x1 - x;
h = y1 - y;
// off is first pixel read so it must be in bounds
if (off < 0 || off >= pix.length) {
// They overflowed their own array
throw new ArrayIndexOutOfBoundsException("Data offset out of bounds.");
}
// pix.length and off are >= 0 so remainder >= 0
int remainder = pix.length - off;
if (remainder < w) {
// They overflowed their own array
throw new ArrayIndexOutOfBoundsException("Data array is too short.");
}
int num;
if (scansize < 0) {
num = (off / -scansize) + 1;
} else if (scansize > 0) {
num = ((remainder - w) / scansize) + 1;
} else {
num = h;
}
if (h > num) {
// They overflowed their own array.
throw new ArrayIndexOutOfBoundsException("Data array is too short.");
}
if (isSameCM && (cmodel != model) && (srcLUT != null) && (model instanceof IndexColorModel) && (biRaster instanceof ByteComponentRaster)) {
IndexColorModel icm = (IndexColorModel) model;
ByteComponentRaster bct = (ByteComponentRaster) biRaster;
int numlut = numSrcLUT;
if (!setDiffICM(x, y, w, h, srcLUT, srcLUTtransIndex, numSrcLUT, icm, pix, off, scansize, bct, bct.getDataOffset(0))) {
convertToRGB();
} else {
// Note that setDiffICM modified the raster directly
// so we must mark it as changed
bct.markDirty();
if (numlut != numSrcLUT) {
boolean hasAlpha = icm.hasAlpha();
if (srcLUTtransIndex != -1) {
hasAlpha = true;
}
int nbits = icm.getPixelSize();
icm = new IndexColorModel(nbits, numSrcLUT, srcLUT, 0, hasAlpha, srcLUTtransIndex, (nbits > 8 ? DataBuffer.TYPE_USHORT : DataBuffer.TYPE_BYTE));
cmodel = icm;
bimage = createImage(icm, bct, false, null);
}
return;
}
}
if (isDefaultBI) {
int pixel;
IntegerComponentRaster iraster = (IntegerComponentRaster) biRaster;
if (srcLUT != null && model instanceof IndexColorModel) {
if (model != srcModel) {
// Fill in the new lut
((IndexColorModel) model).getRGBs(srcLUT);
srcModel = model;
}
if (s_useNative) {
// so we must mark it as changed afterwards
if (setICMpixels(x, y, w, h, srcLUT, pix, off, scansize, iraster)) {
iraster.markDirty();
} else {
abort();
return;
}
} else {
int[] storage = new int[w * h];
int soff = 0;
// It is an IndexColorModel
for (int yoff = 0; yoff < h; yoff++, lineOff += scansize) {
poff = lineOff;
for (int i = 0; i < w; i++) {
storage[soff++] = srcLUT[pix[poff++] & 0xff];
}
}
iraster.setDataElements(x, y, w, h, storage);
}
} else {
int[] storage = new int[w];
for (int yoff = y; yoff < y + h; yoff++, lineOff += scansize) {
poff = lineOff;
for (int i = 0; i < w; i++) {
storage[i] = model.getRGB(pix[poff++] & 0xff);
}
iraster.setDataElements(x, yoff, w, 1, storage);
}
availinfo |= ImageObserver.SOMEBITS;
}
} else if ((cmodel == model) && (biRaster instanceof ByteComponentRaster) && (biRaster.getNumDataElements() == 1)) {
ByteComponentRaster bt = (ByteComponentRaster) biRaster;
if (off == 0 && scansize == w) {
bt.putByteData(x, y, w, h, pix);
} else {
byte[] bpix = new byte[w];
poff = off;
for (int yoff = y; yoff < y + h; yoff++) {
System.arraycopy(pix, poff, bpix, 0, w);
bt.putByteData(x, yoff, w, 1, bpix);
poff += scansize;
}
}
} else {
for (int yoff = y; yoff < y + h; yoff++, lineOff += scansize) {
poff = lineOff;
for (int xoff = x; xoff < x + w; xoff++) {
bimage.setRGB(xoff, yoff, model.getRGB(pix[poff++] & 0xff));
}
}
availinfo |= ImageObserver.SOMEBITS;
}
}
if ((availinfo & ImageObserver.FRAMEBITS) == 0) {
newInfo(image, ImageObserver.SOMEBITS, x, y, w, h);
}
}
use of sun.awt.image.ByteComponentRaster in project jdk8u_jdk by JetBrains.
the class WPathGraphics method redrawRegion.
/**
* Have the printing application redraw everything that falls
* within the page bounds defined by <code>region</code>.
*/
@Override
public void redrawRegion(Rectangle2D region, double scaleX, double scaleY, Shape savedClip, AffineTransform savedTransform) throws PrinterException {
WPrinterJob wPrinterJob = (WPrinterJob) getPrinterJob();
Printable painter = getPrintable();
PageFormat pageFormat = getPageFormat();
int pageIndex = getPageIndex();
/* Create a buffered image big enough to hold the portion
* of the source image being printed.
*/
BufferedImage deepImage = new BufferedImage((int) region.getWidth(), (int) region.getHeight(), BufferedImage.TYPE_3BYTE_BGR);
/* Get a graphics for the application to render into.
* We initialize the buffer to white in order to
* match the paper and then we shift the BufferedImage
* so that it covers the area on the page where the
* caller's Image will be drawn.
*/
Graphics2D g = deepImage.createGraphics();
ProxyGraphics2D proxy = new ProxyGraphics2D(g, wPrinterJob);
proxy.setColor(Color.white);
proxy.fillRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
proxy.clipRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
proxy.translate(-region.getX(), -region.getY());
/* Calculate the resolution of the source image.
*/
float sourceResX = (float) (wPrinterJob.getXRes() / scaleX);
float sourceResY = (float) (wPrinterJob.getYRes() / scaleY);
/* The application expects to see user space at 72 dpi.
* so change user space from image source resolution to
* 72 dpi.
*/
proxy.scale(sourceResX / DEFAULT_USER_RES, sourceResY / DEFAULT_USER_RES);
proxy.translate(-wPrinterJob.getPhysicalPrintableX(pageFormat.getPaper()) / wPrinterJob.getXRes() * DEFAULT_USER_RES, -wPrinterJob.getPhysicalPrintableY(pageFormat.getPaper()) / wPrinterJob.getYRes() * DEFAULT_USER_RES);
/* NB User space now has to be at 72 dpi for this calc to be correct */
proxy.transform(new AffineTransform(getPageFormat().getMatrix()));
proxy.setPaint(Color.black);
painter.print(proxy, pageFormat, pageIndex);
g.dispose();
/* We need to set the device clip using saved information.
* savedClip intersects the user clip with a clip that restricts
* the GDI rendered area of our BufferedImage to that which
* may correspond to a rotate or shear.
* The saved device transform is needed as the current transform
* is not likely to be the same.
*/
if (savedClip != null) {
deviceClip(savedClip.getPathIterator(savedTransform));
}
/* Scale the bounding rectangle by the scale transform.
* Because the scaling transform has only x and y
* scaling components it is equivalent to multiplying
* the x components of the bounding rectangle by
* the x scaling factor and to multiplying the y components
* by the y scaling factor.
*/
Rectangle2D.Float scaledBounds = new Rectangle2D.Float((float) (region.getX() * scaleX), (float) (region.getY() * scaleY), (float) (region.getWidth() * scaleX), (float) (region.getHeight() * scaleY));
/* Pull the raster data from the buffered image
* and pass it along to GDI.
*/
ByteComponentRaster tile = (ByteComponentRaster) deepImage.getRaster();
wPrinterJob.drawImage3ByteBGR(tile.getDataStorage(), scaledBounds.x, scaledBounds.y, scaledBounds.width, scaledBounds.height, 0f, 0f, deepImage.getWidth(), deepImage.getHeight());
}
use of sun.awt.image.ByteComponentRaster in project jdk8u_jdk by JetBrains.
the class WPathGraphics method drawImageToPlatform.
/**
* The various <code>drawImage()</code> methods for
* <code>WPathGraphics</code> are all decomposed
* into an invocation of <code>drawImageToPlatform</code>.
* The portion of the passed in image defined by
* <code>srcX, srcY, srcWidth, and srcHeight</code>
* is transformed by the supplied AffineTransform and
* drawn using GDI to the printer context.
*
* @param img The image to be drawn.
* @param xform Used to transform the image before drawing.
* This can be null.
* @param bgcolor This color is drawn where the image has transparent
* pixels. If this parameter is null then the
* pixels already in the destination should show
* through.
* @param srcX With srcY this defines the upper-left corner
* of the portion of the image to be drawn.
*
* @param srcY With srcX this defines the upper-left corner
* of the portion of the image to be drawn.
* @param srcWidth The width of the portion of the image to
* be drawn.
* @param srcHeight The height of the portion of the image to
* be drawn.
* @param handlingTransparency if being recursively called to
* print opaque region of transparent image
*/
@Override
protected boolean drawImageToPlatform(Image image, AffineTransform xform, Color bgcolor, int srcX, int srcY, int srcWidth, int srcHeight, boolean handlingTransparency) {
BufferedImage img = getBufferedImage(image);
if (img == null) {
return true;
}
WPrinterJob wPrinterJob = (WPrinterJob) getPrinterJob();
/* The full transform to be applied to the image is the
* caller's transform concatenated on to the transform
* from user space to device space. If the caller didn't
* supply a transform then we just act as if they passed
* in the identify transform.
*/
AffineTransform fullTransform = getTransform();
if (xform == null) {
xform = new AffineTransform();
}
fullTransform.concatenate(xform);
/* Split the full transform into a pair of
* transforms. The first transform holds effects
* that GDI (under Win95) can not perform such
* as rotation and shearing. The second transform
* is setup to hold only the scaling effects.
* These transforms are created such that a point,
* p, in user space, when transformed by 'fullTransform'
* lands in the same place as when it is transformed
* by 'rotTransform' and then 'scaleTransform'.
*
* The entire image transformation is not in Java in order
* to minimize the amount of memory needed in the VM. By
* dividing the transform in two, we rotate and shear
* the source image in its own space and only go to
* the, usually, larger, device space when we ask
* GDI to perform the final scaling.
* Clamp this to the device scale for better quality printing.
*/
double[] fullMatrix = new double[6];
fullTransform.getMatrix(fullMatrix);
/* Calculate the amount of scaling in the x
* and y directions. This scaling is computed by
* transforming a unit vector along each axis
* and computing the resulting magnitude.
* The computed values 'scaleX' and 'scaleY'
* represent the amount of scaling GDI will be asked
* to perform.
*/
Point2D.Float unitVectorX = new Point2D.Float(1, 0);
Point2D.Float unitVectorY = new Point2D.Float(0, 1);
fullTransform.deltaTransform(unitVectorX, unitVectorX);
fullTransform.deltaTransform(unitVectorY, unitVectorY);
Point2D.Float origin = new Point2D.Float(0, 0);
double scaleX = unitVectorX.distance(origin);
double scaleY = unitVectorY.distance(origin);
double devResX = wPrinterJob.getXRes();
double devResY = wPrinterJob.getYRes();
double devScaleX = devResX / DEFAULT_USER_RES;
double devScaleY = devResY / DEFAULT_USER_RES;
/* check if rotated or sheared */
int transformType = fullTransform.getType();
boolean clampScale = ((transformType & (AffineTransform.TYPE_GENERAL_ROTATION | AffineTransform.TYPE_GENERAL_TRANSFORM)) != 0);
if (clampScale) {
if (scaleX > devScaleX)
scaleX = devScaleX;
if (scaleY > devScaleY)
scaleY = devScaleY;
}
/* We do not need to draw anything if either scaling
* factor is zero.
*/
if (scaleX != 0 && scaleY != 0) {
/* Here's the transformation we will do with Java2D,
*/
AffineTransform rotTransform = new AffineTransform(//m00
fullMatrix[0] / scaleX, //m10
fullMatrix[1] / scaleY, //m01
fullMatrix[2] / scaleX, //m11
fullMatrix[3] / scaleY, //m02
fullMatrix[4] / scaleX, //m12
fullMatrix[5] / scaleY);
/* The scale transform is not used directly: we instead
* directly multiply by scaleX and scaleY.
*
* Conceptually here is what the scaleTransform is:
*
* AffineTransform scaleTransform = new AffineTransform(
* scaleX, //m00
* 0, //m10
* 0, //m01
* scaleY, //m11
* 0, //m02
* 0); //m12
*/
/* Convert the image source's rectangle into the rotated
* and sheared space. Once there, we calculate a rectangle
* that encloses the resulting shape. It is this rectangle
* which defines the size of the BufferedImage we need to
* create to hold the transformed image.
*/
Rectangle2D.Float srcRect = new Rectangle2D.Float(srcX, srcY, srcWidth, srcHeight);
Shape rotShape = rotTransform.createTransformedShape(srcRect);
Rectangle2D rotBounds = rotShape.getBounds2D();
/* add a fudge factor as some fp precision problems have
* been observed which caused pixels to be rounded down and
* out of the image.
*/
rotBounds.setRect(rotBounds.getX(), rotBounds.getY(), rotBounds.getWidth() + 0.001, rotBounds.getHeight() + 0.001);
int boundsWidth = (int) rotBounds.getWidth();
int boundsHeight = (int) rotBounds.getHeight();
if (boundsWidth > 0 && boundsHeight > 0) {
/* If the image has transparent or semi-transparent
* pixels then we'll have the application re-render
* the portion of the page covered by the image.
* The BufferedImage will be at the image's resolution
* to avoid wasting memory. By re-rendering this portion
* of a page all compositing is done by Java2D into
* the BufferedImage and then that image is copied to
* GDI.
* However several special cases can be handled otherwise:
* - bitmask transparency with a solid background colour
* - images which have transparency color models but no
* transparent pixels
* - images with bitmask transparency and an IndexColorModel
* (the common transparent GIF case) can be handled by
* rendering just the opaque pixels.
*/
boolean drawOpaque = true;
if (!handlingTransparency && hasTransparentPixels(img)) {
drawOpaque = false;
if (isBitmaskTransparency(img)) {
if (bgcolor == null) {
if (drawBitmaskImage(img, xform, bgcolor, srcX, srcY, srcWidth, srcHeight)) {
// image drawn, just return.
return true;
}
} else if (bgcolor.getTransparency() == Transparency.OPAQUE) {
drawOpaque = true;
}
}
if (!canDoRedraws()) {
drawOpaque = true;
}
} else {
// if there's no transparent pixels there's no need
// for a background colour. This can avoid edge artifacts
// in rotation cases.
bgcolor = null;
}
// may blit b/g colour (including white) where it shoudn't.
if ((srcX + srcWidth > img.getWidth(null) || srcY + srcHeight > img.getHeight(null)) && canDoRedraws()) {
drawOpaque = false;
}
if (drawOpaque == false) {
fullTransform.getMatrix(fullMatrix);
AffineTransform tx = new AffineTransform(//m00
fullMatrix[0] / devScaleX, //m10
fullMatrix[1] / devScaleY, //m01
fullMatrix[2] / devScaleX, //m11
fullMatrix[3] / devScaleY, //m02
fullMatrix[4] / devScaleX, //m12
fullMatrix[5] / devScaleY);
Rectangle2D.Float rect = new Rectangle2D.Float(srcX, srcY, srcWidth, srcHeight);
Shape shape = fullTransform.createTransformedShape(rect);
// Region isn't user space because its potentially
// been rotated for landscape.
Rectangle2D region = shape.getBounds2D();
region.setRect(region.getX(), region.getY(), region.getWidth() + 0.001, region.getHeight() + 0.001);
// Try to limit the amount of memory used to 8Mb, so
// if at device resolution this exceeds a certain
// image size then scale down the region to fit in
// that memory, but never to less than 72 dpi.
int w = (int) region.getWidth();
int h = (int) region.getHeight();
int nbytes = w * h * 3;
int maxBytes = 8 * 1024 * 1024;
double origDpi = (devResX < devResY) ? devResX : devResY;
int dpi = (int) origDpi;
double scaleFactor = 1;
double maxSFX = w / (double) boundsWidth;
double maxSFY = h / (double) boundsHeight;
double maxSF = (maxSFX > maxSFY) ? maxSFY : maxSFX;
int minDpi = (int) (dpi / maxSF);
if (minDpi < DEFAULT_USER_RES)
minDpi = DEFAULT_USER_RES;
while (nbytes > maxBytes && dpi > minDpi) {
scaleFactor *= 2;
dpi /= 2;
nbytes /= 4;
}
if (dpi < minDpi) {
scaleFactor = (origDpi / minDpi);
}
region.setRect(region.getX() / scaleFactor, region.getY() / scaleFactor, region.getWidth() / scaleFactor, region.getHeight() / scaleFactor);
/*
* We need to have the clip as part of the saved state,
* either directly, or all the components that are
* needed to reconstitute it (image source area,
* image transform and current graphics transform).
* The clip is described in user space, so we need to
* save the current graphics transform anyway so just
* save these two.
*/
wPrinterJob.saveState(getTransform(), getClip(), region, scaleFactor, scaleFactor);
return true;
/* The image can be rendered directly by GDI so we
* copy it into a BufferedImage (this takes care of
* ColorSpace and BufferedImageOp issues) and then
* send that to GDI.
*/
} else {
/* Create a buffered image big enough to hold the portion
* of the source image being printed.
* The image format will be 3BYTE_BGR for most cases
* except where we can represent the image as a 1, 4 or 8
* bits-per-pixel DIB.
*/
int dibType = BufferedImage.TYPE_3BYTE_BGR;
IndexColorModel icm = null;
ColorModel cm = img.getColorModel();
int imgType = img.getType();
if (cm instanceof IndexColorModel && cm.getPixelSize() <= 8 && (imgType == BufferedImage.TYPE_BYTE_BINARY || imgType == BufferedImage.TYPE_BYTE_INDEXED)) {
icm = (IndexColorModel) cm;
dibType = imgType;
/* BYTE_BINARY may be 2 bpp which DIB can't handle.
* Convert this to 4bpp.
*/
if (imgType == BufferedImage.TYPE_BYTE_BINARY && cm.getPixelSize() == 2) {
int[] rgbs = new int[16];
icm.getRGBs(rgbs);
boolean transparent = icm.getTransparency() != Transparency.OPAQUE;
int transpixel = icm.getTransparentPixel();
icm = new IndexColorModel(4, 16, rgbs, 0, transparent, transpixel, DataBuffer.TYPE_BYTE);
}
}
int iw = (int) rotBounds.getWidth();
int ih = (int) rotBounds.getHeight();
BufferedImage deepImage = null;
/* If there is no special transform needed (this is a
* simple BLIT) and dibType == img.getType() and we
* didn't create a new IndexColorModel AND the whole of
* the source image is being drawn (GDI can't handle a
* portion of the original source image) then we
* don't need to create this intermediate image - GDI
* can access the data from the original image.
* Since a subimage can be created by calling
* BufferedImage.getSubImage() that condition needs to
* be accounted for too. This implies inspecting the
* data buffer. In the end too many cases are not able
* to take advantage of this option until we can teach
* the native code to properly navigate the data buffer.
* There was a concern that since in native code since we
* need to DWORD align and flip to a bottom up DIB that
* the "original" image may get perturbed by this.
* But in fact we always malloc new memory for the aligned
* copy so this isn't a problem.
* This points out that we allocate two temporaries copies
* of the image : one in Java and one in native. If
* we can be smarter about not allocating this one when
* not needed, that would seem like a good thing to do,
* even if in many cases the ColorModels don't match and
* its needed.
* Until all of this is resolved newImage is always true.
*/
boolean newImage = true;
if (newImage) {
if (icm == null) {
deepImage = new BufferedImage(iw, ih, dibType);
} else {
deepImage = new BufferedImage(iw, ih, dibType, icm);
}
/* Setup a Graphics2D on to the BufferedImage so that
* the source image when copied, lands within the
* image buffer.
*/
Graphics2D imageGraphics = deepImage.createGraphics();
imageGraphics.clipRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
imageGraphics.translate(-rotBounds.getX(), -rotBounds.getY());
imageGraphics.transform(rotTransform);
/* Fill the BufferedImage either with the caller
* supplied color, 'bgColor' or, if null, with white.
*/
if (bgcolor == null) {
bgcolor = Color.white;
}
imageGraphics.drawImage(img, srcX, srcY, srcX + srcWidth, srcY + srcHeight, srcX, srcY, srcX + srcWidth, srcY + srcHeight, bgcolor, null);
imageGraphics.dispose();
} else {
deepImage = img;
}
/* Scale the bounding rectangle by the scale transform.
* Because the scaling transform has only x and y
* scaling components it is equivalent to multiply
* the x components of the bounding rectangle by
* the x scaling factor and to multiply the y components
* by the y scaling factor.
*/
Rectangle2D.Float scaledBounds = new Rectangle2D.Float((float) (rotBounds.getX() * scaleX), (float) (rotBounds.getY() * scaleY), (float) (rotBounds.getWidth() * scaleX), (float) (rotBounds.getHeight() * scaleY));
/* Pull the raster data from the buffered image
* and pass it along to GDI.
*/
WritableRaster raster = deepImage.getRaster();
byte[] data;
if (raster instanceof ByteComponentRaster) {
data = ((ByteComponentRaster) raster).getDataStorage();
} else if (raster instanceof BytePackedRaster) {
data = ((BytePackedRaster) raster).getDataStorage();
} else {
return false;
}
int bitsPerPixel = 24;
SampleModel sm = deepImage.getSampleModel();
if (sm instanceof ComponentSampleModel) {
ComponentSampleModel csm = (ComponentSampleModel) sm;
bitsPerPixel = csm.getPixelStride() * 8;
} else if (sm instanceof MultiPixelPackedSampleModel) {
MultiPixelPackedSampleModel mppsm = (MultiPixelPackedSampleModel) sm;
bitsPerPixel = mppsm.getPixelBitStride();
} else {
if (icm != null) {
int diw = deepImage.getWidth();
int dih = deepImage.getHeight();
if (diw > 0 && dih > 0) {
bitsPerPixel = data.length * 8 / diw / dih;
}
}
}
/* Because the caller's image has been rotated
* and sheared into our BufferedImage and because
* we will be handing that BufferedImage directly to
* GDI, we need to set an additional clip. This clip
* makes sure that only parts of the BufferedImage
* that are also part of the caller's image are drawn.
*/
Shape holdClip = getClip();
clip(xform.createTransformedShape(srcRect));
deviceClip(getClip().getPathIterator(getTransform()));
wPrinterJob.drawDIBImage(data, scaledBounds.x, scaledBounds.y, (float) Math.rint(scaledBounds.width + 0.5), (float) Math.rint(scaledBounds.height + 0.5), 0f, 0f, deepImage.getWidth(), deepImage.getHeight(), bitsPerPixel, icm);
setClip(holdClip);
}
}
}
return true;
}
use of sun.awt.image.ByteComponentRaster in project jdk8u_jdk by JetBrains.
the class PSPathGraphics method redrawRegion.
/** Redraw a rectanglular area using a proxy graphics
* To do this we need to know the rectangular area to redraw and
* the transform & clip in effect at the time of the original drawImage
*
*/
public void redrawRegion(Rectangle2D region, double scaleX, double scaleY, Shape savedClip, AffineTransform savedTransform) throws PrinterException {
PSPrinterJob psPrinterJob = (PSPrinterJob) getPrinterJob();
Printable painter = getPrintable();
PageFormat pageFormat = getPageFormat();
int pageIndex = getPageIndex();
/* Create a buffered image big enough to hold the portion
* of the source image being printed.
*/
BufferedImage deepImage = new BufferedImage((int) region.getWidth(), (int) region.getHeight(), BufferedImage.TYPE_3BYTE_BGR);
/* Get a graphics for the application to render into.
* We initialize the buffer to white in order to
* match the paper and then we shift the BufferedImage
* so that it covers the area on the page where the
* caller's Image will be drawn.
*/
Graphics2D g = deepImage.createGraphics();
ProxyGraphics2D proxy = new ProxyGraphics2D(g, psPrinterJob);
proxy.setColor(Color.white);
proxy.fillRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
proxy.clipRect(0, 0, deepImage.getWidth(), deepImage.getHeight());
proxy.translate(-region.getX(), -region.getY());
/* Calculate the resolution of the source image.
*/
float sourceResX = (float) (psPrinterJob.getXRes() / scaleX);
float sourceResY = (float) (psPrinterJob.getYRes() / scaleY);
/* The application expects to see user space at 72 dpi.
* so change user space from image source resolution to
* 72 dpi.
*/
proxy.scale(sourceResX / DEFAULT_USER_RES, sourceResY / DEFAULT_USER_RES);
proxy.translate(-psPrinterJob.getPhysicalPrintableX(pageFormat.getPaper()) / psPrinterJob.getXRes() * DEFAULT_USER_RES, -psPrinterJob.getPhysicalPrintableY(pageFormat.getPaper()) / psPrinterJob.getYRes() * DEFAULT_USER_RES);
/* NB User space now has to be at 72 dpi for this calc to be correct */
proxy.transform(new AffineTransform(getPageFormat().getMatrix()));
proxy.setPaint(Color.black);
painter.print(proxy, pageFormat, pageIndex);
g.dispose();
/* In PSPrinterJob images are printed in device space
* and therefore we need to set a device space clip.
*/
psPrinterJob.setClip(savedTransform.createTransformedShape(savedClip));
/* Scale the bounding rectangle by the scale transform.
* Because the scaling transform has only x and y
* scaling components it is equivalent to multiply
* the x components of the bounding rectangle by
* the x scaling factor and to multiply the y components
* by the y scaling factor.
*/
Rectangle2D.Float scaledBounds = new Rectangle2D.Float((float) (region.getX() * scaleX), (float) (region.getY() * scaleY), (float) (region.getWidth() * scaleX), (float) (region.getHeight() * scaleY));
/* Pull the raster data from the buffered image
* and pass it along to PS.
*/
ByteComponentRaster tile = (ByteComponentRaster) deepImage.getRaster();
psPrinterJob.drawImageBGR(tile.getDataStorage(), scaledBounds.x, scaledBounds.y, scaledBounds.width, scaledBounds.height, 0f, 0f, deepImage.getWidth(), deepImage.getHeight(), deepImage.getWidth(), deepImage.getHeight());
}
Aggregations