use of java.io.BufferedOutputStream in project hadoop by apache.
the class BlockReaderRemote method newBlockReader.
/**
* Create a new BlockReader specifically to satisfy a read.
* This method also sends the OP_READ_BLOCK request.
*
* @param file File location
* @param block The block object
* @param blockToken The block token for security
* @param startOffset The read offset, relative to block head
* @param len The number of bytes to read
* @param verifyChecksum Whether to verify checksum
* @param clientName Client name
* @param peer The Peer to use
* @param datanodeID The DatanodeID this peer is connected to
* @return New BlockReader instance, or null on error.
*/
public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy, Tracer tracer, int networkDistance) throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy);
//
// Get bytes in block
//
DataInputStream in = new DataInputStream(peer.getInputStream());
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
checkSuccess(status, peer, block, file);
ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo();
DataChecksum checksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
//Warning when we get CHECKSUM_NULL?
// Read the first chunk offset.
long firstChunkOffset = checksumInfo.getChunkOffset();
if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file);
}
return new BlockReaderRemote(file, block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance);
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class OfflineImageReconstructor method run.
/**
* Run the OfflineImageReconstructor.
*
* @param inputPath The input path to use.
* @param outputPath The output path to use.
*
* @throws Exception On error.
*/
public static void run(String inputPath, String outputPath) throws Exception {
MessageDigest digester = MD5Hash.getDigester();
FileOutputStream fout = null;
File foutHash = new File(outputPath + ".md5");
// delete any .md5 file that exists
Files.deleteIfExists(foutHash.toPath());
CountingOutputStream out = null;
FileInputStream fis = null;
InputStreamReader reader = null;
try {
Files.deleteIfExists(Paths.get(outputPath));
fout = new FileOutputStream(outputPath);
fis = new FileInputStream(inputPath);
reader = new InputStreamReader(fis, Charset.forName("UTF-8"));
out = new CountingOutputStream(new DigestOutputStream(new BufferedOutputStream(fout), digester));
OfflineImageReconstructor oir = new OfflineImageReconstructor(out, reader);
oir.processXml();
} finally {
IOUtils.cleanup(LOG, reader, fis, out, fout);
}
// Write the md5 file
MD5FileUtils.saveMD5File(new File(outputPath), new MD5Hash(digester.digest()));
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class CLI method viewHistory.
private void viewHistory(String historyFile, boolean all, String historyOutFile, String format) throws IOException {
HistoryViewer historyViewer = new HistoryViewer(historyFile, getConf(), all, format);
PrintStream ps = System.out;
if (historyOutFile != null) {
ps = new PrintStream(new BufferedOutputStream(new FileOutputStream(new File(historyOutFile))), true, "UTF-8");
}
historyViewer.print(ps);
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class JarFinder method jarDir.
public static void jarDir(File dir, String relativePath, ZipOutputStream zos) throws IOException {
Preconditions.checkNotNull(relativePath, "relativePath");
Preconditions.checkNotNull(zos, "zos");
// by JAR spec, if there is a manifest, it must be the first entry in the
// ZIP.
File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
if (!manifestFile.exists()) {
zos.putNextEntry(manifestEntry);
new Manifest().write(new BufferedOutputStream(zos));
zos.closeEntry();
} else {
copyToZipStream(manifestFile, manifestEntry, zos);
}
zos.closeEntry();
zipDir(dir, relativePath, zos, true);
zos.close();
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class TestLz4CompressorDecompressor method testCompressorDecopressorLogicWithCompressionStreams.
// test compress/decompress process through CompressionOutputStream/CompressionInputStream api
@Test
public void testCompressorDecopressorLogicWithCompressionStreams() {
DataOutputStream deflateOut = null;
DataInputStream inflateIn = null;
int BYTE_SIZE = 1024 * 100;
byte[] bytes = generate(BYTE_SIZE);
int bufferSize = 262144;
int compressionOverhead = (bufferSize / 6) + 32;
try {
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = new BlockCompressorStream(compressedDataBuffer, new Lz4Compressor(bufferSize), bufferSize, compressionOverhead);
deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes, 0, bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, compressedDataBuffer.getLength());
CompressionInputStream inflateFilter = new BlockDecompressorStream(deCompressedDataBuffer, new Lz4Decompressor(bufferSize), bufferSize);
inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result = new byte[BYTE_SIZE];
inflateIn.read(result);
assertArrayEquals("original array not equals compress/decompressed array", result, bytes);
} catch (IOException e) {
fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!");
} finally {
try {
if (deflateOut != null)
deflateOut.close();
if (inflateIn != null)
inflateIn.close();
} catch (Exception e) {
}
}
}
Aggregations