use of java.io.BufferedOutputStream in project hadoop by apache.
the class TestAliyunOSSFileSystemStore method writeRenameReadCompare.
protected void writeRenameReadCompare(Path path, long len) throws IOException, NoSuchAlgorithmException {
// If len > fs.oss.multipart.upload.threshold,
// we'll use a multipart upload copy
MessageDigest digest = MessageDigest.getInstance("MD5");
OutputStream out = new BufferedOutputStream(new DigestOutputStream(fs.create(path, false), digest));
for (long i = 0; i < len; i++) {
out.write('Q');
}
out.flush();
out.close();
assertTrue("Exists", fs.exists(path));
Path copyPath = path.suffix(".copy");
fs.rename(path, copyPath);
assertTrue("Copy exists", fs.exists(copyPath));
// Download file from Aliyun OSS and compare the digest against the original
MessageDigest digest2 = MessageDigest.getInstance("MD5");
InputStream in = new BufferedInputStream(new DigestInputStream(fs.open(copyPath), digest2));
long copyLen = 0;
while (in.read() != -1) {
copyLen++;
}
in.close();
assertEquals("Copy length matches original", len, copyLen);
assertArrayEquals("Digests match", digest.digest(), digest2.digest());
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class RetriableFileCopyCommand method copyToFile.
private long copyToFile(Path targetPath, FileSystem targetFS, CopyListingFileStatus source, long sourceOffset, Mapper.Context context, EnumSet<FileAttribute> fileAttributes, final FileChecksum sourceChecksum) throws IOException {
FsPermission permission = FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(targetFS.getConf()));
final OutputStream outStream;
if (action == FileAction.OVERWRITE) {
final short repl = getReplicationFactor(fileAttributes, source, targetFS, targetPath);
final long blockSize = getBlockSize(fileAttributes, source, targetFS, targetPath);
FSDataOutputStream out = targetFS.create(targetPath, permission, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), BUFFER_SIZE, repl, blockSize, context, getChecksumOpt(fileAttributes, sourceChecksum));
outStream = new BufferedOutputStream(out);
} else {
outStream = new BufferedOutputStream(targetFS.append(targetPath, BUFFER_SIZE));
}
return copyBytes(source, sourceOffset, outStream, BUFFER_SIZE, context);
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class ITestJets3tNativeFileSystemStore method writeRenameReadCompare.
protected void writeRenameReadCompare(Path path, long len) throws IOException, NoSuchAlgorithmException {
// If len > fs.s3n.multipart.uploads.block.size,
// we'll use a multipart upload copy
MessageDigest digest = MessageDigest.getInstance("MD5");
OutputStream out = new BufferedOutputStream(new DigestOutputStream(fs.create(path, false), digest));
for (long i = 0; i < len; i++) {
out.write('Q');
}
out.flush();
out.close();
assertTrue("Exists", fs.exists(path));
// Depending on if this file is over 5 GB or not,
// rename will cause a multipart upload copy
Path copyPath = path.suffix(".copy");
fs.rename(path, copyPath);
assertTrue("Copy exists", fs.exists(copyPath));
// Download file from S3 and compare the digest against the original
MessageDigest digest2 = MessageDigest.getInstance("MD5");
InputStream in = new BufferedInputStream(new DigestInputStream(fs.open(copyPath), digest2));
long copyLen = 0;
while (in.read() != -1) {
copyLen++;
}
in.close();
assertEquals("Copy length matches original", len, copyLen);
assertArrayEquals("Digests match", digest.digest(), digest2.digest());
}
use of java.io.BufferedOutputStream in project camel by apache.
the class MllpClientResource method connect.
public void connect(int connectTimeout) {
try {
clientSocket = new Socket();
clientSocket.connect(new InetSocketAddress(mllpHost, mllpPort), connectTimeout);
clientSocket.setSoTimeout(soTimeout);
clientSocket.setSoLinger(false, -1);
clientSocket.setReuseAddress(reuseAddress);
clientSocket.setTcpNoDelay(tcpNoDelay);
inputStream = clientSocket.getInputStream();
outputStream = new BufferedOutputStream(clientSocket.getOutputStream(), 2048);
} catch (IOException e) {
String errorMessage = String.format("Unable to establish connection to %s:%s", mllpHost, mllpPort);
log.error(errorMessage, e);
throw new MllpJUnitResourceException(errorMessage, e);
}
}
use of java.io.BufferedOutputStream in project hadoop by apache.
the class SaslRpcClient method getOutputStream.
/**
* Get SASL wrapped OutputStream if SASL QoP requires wrapping,
* otherwise return original stream. Can be called only after
* saslConnect() has been called.
*
* @param out - OutputStream used to make the connection
* @return OutputStream that may be using wrapping
* @throws IOException
*/
public OutputStream getOutputStream(OutputStream out) throws IOException {
if (useWrap()) {
// the client and server negotiate a maximum buffer size that can be
// wrapped
String maxBuf = (String) saslClient.getNegotiatedProperty(Sasl.RAW_SEND_SIZE);
out = new BufferedOutputStream(new WrappedOutputStream(out), Integer.parseInt(maxBuf));
}
return out;
}
Aggregations