use of org.apache.hadoop.io.compress.CompressionInputStream in project hadoop by apache.
the class TestZStandardCompressorDecompressor method testCompressingWithOneByteOutputBuffer.
@Test
public void testCompressingWithOneByteOutputBuffer() throws Exception {
int uncompressedSize = (int) FileUtils.sizeOf(uncompressedFile);
byte[] bytes = FileUtils.readFileToByteArray(uncompressedFile);
assertEquals(uncompressedSize, bytes.length);
Configuration conf = new Configuration();
ZStandardCodec codec = new ZStandardCodec();
codec.setConf(conf);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Compressor compressor = new ZStandardCompressor(3, IO_FILE_BUFFER_SIZE_DEFAULT, 1);
CompressionOutputStream outputStream = codec.createOutputStream(baos, compressor);
for (byte aByte : bytes) {
outputStream.write(aByte);
}
outputStream.finish();
outputStream.close();
assertEquals(uncompressedSize, compressor.getBytesRead());
assertTrue(compressor.finished());
// just make sure we can decompress the file
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
Decompressor decompressor = codec.createDecompressor();
CompressionInputStream inputStream = codec.createInputStream(bais, decompressor);
byte[] buffer = new byte[100];
int n = buffer.length;
while ((n = inputStream.read(buffer, 0, n)) != -1) {
byteArrayOutputStream.write(buffer, 0, n);
}
assertArrayEquals(bytes, byteArrayOutputStream.toByteArray());
}
use of org.apache.hadoop.io.compress.CompressionInputStream in project ignite by apache.
the class HadoopSnappyTest method checkSnappy.
/**
* Internal check routine.
*
* @throws Throwable If failed.
*/
public static void checkSnappy() throws Throwable {
try {
byte[] expBytes = new byte[BYTE_SIZE];
byte[] actualBytes = new byte[BYTE_SIZE];
for (int i = 0; i < expBytes.length; i++) expBytes[i] = (byte) ThreadLocalRandom.current().nextInt(16);
SnappyCodec codec = new SnappyCodec();
codec.setConf(new Configuration());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (CompressionOutputStream cos = codec.createOutputStream(baos)) {
cos.write(expBytes);
cos.flush();
}
try (CompressionInputStream cis = codec.createInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
int read = cis.read(actualBytes, 0, actualBytes.length);
assert read == actualBytes.length;
}
assert Arrays.equals(expBytes, actualBytes);
} catch (Throwable e) {
System.out.println("Snappy check failed:");
System.out.println("### NativeCodeLoader.isNativeCodeLoaded: " + NativeCodeLoader.isNativeCodeLoaded());
System.out.println("### SnappyCompressor.isNativeCodeLoaded: " + SnappyCompressor.isNativeCodeLoaded());
throw e;
}
}
use of org.apache.hadoop.io.compress.CompressionInputStream in project apex-malhar by apache.
the class AbstractFileOutputOperatorTest method testSnappyCompressionSimple.
@Test
public void testSnappyCompressionSimple() throws IOException {
if (checkNativeSnappy()) {
return;
}
File snappyFile = new File(testMeta.getDir(), "snappyTestFile.snappy");
BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(snappyFile));
Configuration conf = new Configuration();
CompressionCodec codec = (CompressionCodec) ReflectionUtils.newInstance(SnappyCodec.class, conf);
FilterStreamCodec.SnappyFilterStream filterStream = new FilterStreamCodec.SnappyFilterStream(codec.createOutputStream(os));
int ONE_MB = 1024 * 1024;
String testStr = "TestSnap-16bytes";
for (int i = 0; i < ONE_MB; i++) {
// write 16 MBs
filterStream.write(testStr.getBytes());
}
filterStream.flush();
filterStream.close();
CompressionInputStream is = codec.createInputStream(new FileInputStream(snappyFile));
byte[] recovered = new byte[testStr.length()];
int bytesRead = is.read(recovered);
is.close();
assertEquals(testStr, new String(recovered));
}
use of org.apache.hadoop.io.compress.CompressionInputStream in project tez by apache.
the class TestShuffleUtils method testInternalErrorTranslation.
@Test
public void testInternalErrorTranslation() throws Exception {
String codecErrorMsg = "codec failure";
CompressionInputStream mockCodecStream = mock(CompressionInputStream.class);
when(mockCodecStream.read(any(byte[].class), anyInt(), anyInt())).thenThrow(new InternalError(codecErrorMsg));
Decompressor mockDecoder = mock(Decompressor.class);
CompressionCodec mockCodec = mock(CompressionCodec.class);
when(mockCodec.createDecompressor()).thenReturn(mockDecoder);
when(mockCodec.createInputStream(any(InputStream.class), any(Decompressor.class))).thenReturn(mockCodecStream);
byte[] header = new byte[] { (byte) 'T', (byte) 'I', (byte) 'F', (byte) 1 };
try {
ShuffleUtils.shuffleToMemory(new byte[1024], new ByteArrayInputStream(header), 1024, 128, mockCodec, false, 0, mock(Logger.class), null);
Assert.fail("shuffle was supposed to throw!");
} catch (IOException e) {
Assert.assertTrue(e.getCause() instanceof InternalError);
Assert.assertTrue(e.getMessage().contains(codecErrorMsg));
}
}
use of org.apache.hadoop.io.compress.CompressionInputStream in project brisk by riptano.
the class CompressionTests method testSnappyCompression.
@Test
public void testSnappyCompression() throws IOException {
SnappyCodec c = new SnappyCodec(new Configuration());
byte[] inmsg = new byte[1024 * 1024 * 10];
fillArray(inmsg);
byte[] buffer = new byte[1024 * 1024];
byte[] outmsg = new byte[1024 * 1024 * 16];
for (int k = 0; k < 64; k++) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
CompressionOutputStream cout = c.createOutputStream(bout);
cout.write(inmsg);
cout.flush();
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
CompressionInputStream cin = c.createInputStream(bin);
int totaln = 0;
while (cin.available() > 0) {
int n = cin.read(buffer);
if (n < 0)
break;
try {
System.arraycopy(buffer, 0, outmsg, totaln, n);
} catch (Throwable t) {
System.err.println("n = " + n + " totaln " + totaln);
throw new RuntimeException(t);
}
totaln += n;
}
assertEquals(inmsg.length, totaln);
for (int i = 0; i < inmsg.length; i++) {
assertEquals(inmsg[i], outmsg[i]);
}
assertEquals(new String(inmsg), new String(outmsg, 0, totaln));
}
}
Aggregations