use of java.io.BufferedInputStream in project elasticsearch by elastic.
the class DeflateCompressor method streamInput.
@Override
public StreamInput streamInput(StreamInput in) throws IOException {
final byte[] headerBytes = new byte[HEADER.length];
int len = 0;
while (len < headerBytes.length) {
final int read = in.read(headerBytes, len, headerBytes.length - len);
if (read == -1) {
break;
}
len += read;
}
if (len != HEADER.length || Arrays.equals(headerBytes, HEADER) == false) {
throw new IllegalArgumentException("Input stream is not compressed with DEFLATE!");
}
final boolean nowrap = true;
final Inflater inflater = new Inflater(nowrap);
InputStream decompressedIn = new InflaterInputStream(in, inflater, BUFFER_SIZE);
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
return new InputStreamStreamInput(decompressedIn) {
final AtomicBoolean closed = new AtomicBoolean(false);
public void close() throws IOException {
try {
super.close();
} finally {
if (closed.compareAndSet(false, true)) {
// important to release native memory
inflater.end();
}
}
}
};
}
use of java.io.BufferedInputStream in project elasticsearch by elastic.
the class JsonXContentGenerator method writeRawField.
@Override
public void writeRawField(String name, InputStream content) throws IOException {
if (content.markSupported() == false) {
// needed for the XContentFactory.xContentType call
content = new BufferedInputStream(content);
}
XContentType contentType = XContentFactory.xContentType(content);
if (contentType == null) {
throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed");
}
writeRawField(name, content, contentType);
}
use of java.io.BufferedInputStream in project buck by facebook.
the class RepackZipEntriesStep method execute.
@Override
public StepExecutionResult execute(ExecutionContext context) {
Path inputFile = filesystem.getPathForRelativePath(inputPath);
Path outputFile = filesystem.getPathForRelativePath(outputPath);
try (ZipInputStream in = new ZipInputStream(new BufferedInputStream(Files.newInputStream(inputFile)));
CustomZipOutputStream out = ZipOutputStreams.newOutputStream(outputFile)) {
for (ZipEntry entry = in.getNextEntry(); entry != null; entry = in.getNextEntry()) {
CustomZipEntry customEntry = new CustomZipEntry(entry);
if (entries.contains(customEntry.getName())) {
customEntry.setCompressionLevel(compressionLevel.getValue());
}
InputStream toUse;
// If we're using STORED files, we must pre-calculate the CRC.
if (customEntry.getMethod() == ZipEntry.STORED) {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
ByteStreams.copy(in, bos);
byte[] bytes = bos.toByteArray();
customEntry.setCrc(Hashing.crc32().hashBytes(bytes).padToLong());
customEntry.setSize(bytes.length);
customEntry.setCompressedSize(bytes.length);
toUse = new ByteArrayInputStream(bytes);
}
} else {
toUse = in;
}
out.putNextEntry(customEntry);
ByteStreams.copy(toUse, out);
out.closeEntry();
}
return StepExecutionResult.SUCCESS;
} catch (IOException e) {
context.logError(e, "Unable to repack zip");
return StepExecutionResult.ERROR;
}
}
use of java.io.BufferedInputStream in project druid by druid-io.
the class CompressionUtils method unzip.
/**
* Unzip the pulled file to an output directory. This is only expected to work on zips with lone files, and is not intended for zips with directory structures.
*
* @param pulledFile The file to unzip
* @param outDir The directory to store the contents of the file.
*
* @return a FileCopyResult of the files which were written to disk
*
* @throws IOException
*/
public static FileUtils.FileCopyResult unzip(final File pulledFile, final File outDir) throws IOException {
if (!(outDir.exists() && outDir.isDirectory())) {
throw new ISE("outDir[%s] must exist and be a directory", outDir);
}
log.info("Unzipping file[%s] to [%s]", pulledFile, outDir);
final FileUtils.FileCopyResult result = new FileUtils.FileCopyResult();
try (final ZipFile zipFile = new ZipFile(pulledFile)) {
final Enumeration<? extends ZipEntry> enumeration = zipFile.entries();
while (enumeration.hasMoreElements()) {
final ZipEntry entry = enumeration.nextElement();
result.addFiles(FileUtils.retryCopy(new ByteSource() {
@Override
public InputStream openStream() throws IOException {
return new BufferedInputStream(zipFile.getInputStream(entry));
}
}, new File(outDir, entry.getName()), FileUtils.IS_EXCEPTION, DEFAULT_RETRY_COUNT).getFiles());
}
}
return result;
}
use of java.io.BufferedInputStream in project druid by druid-io.
the class IntermediateLongSupplierSerializer method makeDelegate.
private void makeDelegate() throws IOException {
CompressionFactory.LongEncodingWriter writer;
long delta;
try {
delta = LongMath.checkedSubtract(maxVal, minVal);
} catch (ArithmeticException e) {
delta = -1;
}
if (uniqueValues.size() <= CompressionFactory.MAX_TABLE_SIZE) {
writer = new TableLongEncodingWriter(uniqueValues);
} else if (delta != -1 && delta != Long.MAX_VALUE) {
writer = new DeltaLongEncodingWriter(minVal, delta);
} else {
writer = new LongsLongEncodingWriter(order);
}
if (compression == CompressedObjectStrategy.CompressionStrategy.NONE) {
delegate = new EntireLayoutLongSupplierSerializer(ioPeon, filenameBase, order, writer);
} else {
delegate = new BlockLayoutLongSupplierSerializer(ioPeon, filenameBase, order, writer, compression);
}
try (DataInputStream tempIn = new DataInputStream(new BufferedInputStream(ioPeon.makeInputStream(tempFile)))) {
delegate.open();
while (tempIn.available() > 0) {
delegate.add(tempIn.readLong());
}
}
}
Aggregations