use of java.io.BufferedOutputStream in project elasticsearch by elastic.
the class MockTcpTransport method sendMessage.
@Override
protected void sendMessage(MockChannel mockChannel, BytesReference reference, Runnable sendListener) throws IOException {
synchronized (mockChannel) {
final Socket socket = mockChannel.activeChannel;
OutputStream outputStream = new BufferedOutputStream(socket.getOutputStream());
reference.writeTo(outputStream);
outputStream.flush();
}
if (sendListener != null) {
sendListener.run();
}
}
use of java.io.BufferedOutputStream in project buck by facebook.
the class ExopackageSoLoader method copySoFileIfRequired.
private static File copySoFileIfRequired(String libname) {
File libraryFile = new File(privateNativeLibsDir, libname + ".so");
if (libraryFile.exists()) {
return libraryFile;
}
if (!abi1Libraries.containsKey(libname) && !abi2Libraries.containsKey(libname)) {
return null;
}
String abiDir;
String sourceFilename;
if (abi1Libraries.containsKey(libname)) {
sourceFilename = abi1Libraries.get(libname);
abiDir = Build.CPU_ABI;
} else {
sourceFilename = abi2Libraries.get(libname);
abiDir = Build.CPU_ABI2;
}
String sourcePath = nativeLibsDir + abiDir + "/" + sourceFilename;
try {
InputStream in = null;
OutputStream out = null;
try {
in = new BufferedInputStream(new FileInputStream(sourcePath));
out = new BufferedOutputStream(new FileOutputStream(libraryFile));
byte[] buffer = new byte[4 * 1024];
int len;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
} finally {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return libraryFile;
}
use of java.io.BufferedOutputStream in project cogtool by cogtool.
the class ZipUtil method unzip.
/**
* Unzips a zipfile to a destination directory.
* @param zip the file to unzip
* @param fileDir the destination directory for zipfile contents
* @throws FileNotFoundException
* @throws IOException
*/
public static void unzip(ZipFile zip, File fileDir) throws FileNotFoundException, IOException {
// Read out all entries from ZipFile via input streams
for (Enumeration<? extends ZipEntry> en = zip.entries(); en.hasMoreElements(); ) {
ZipEntry ze = en.nextElement();
// Get info from file entry
long size = ze.getSize();
// Create File in fileDir for unpacked entry
String name = ze.getName();
// System.out.println("Unzipping: " + name);
File zeFile = new File(fileDir, name);
// Check for a trailing slash to see if this is a directory entry
if (name.charAt(name.length() - 1) == '/') {
// If this entry is a directory, make it
zeFile.mkdirs();
} else {
// if this entry is a file, make its parent directories, then it
zeFile.getParentFile().mkdirs();
zeFile.createNewFile();
// Create plus OutputStream to the new file
FileOutputStream fout = null;
OutputStream out = null;
// Get ZipInputStream for reading data
InputStream zin = null;
try {
fout = new FileOutputStream(zeFile);
out = new BufferedOutputStream(fout);
zin = zip.getInputStream(ze);
// Set modification time
zeFile.setLastModified(ze.getTime());
// Copy data from zin to out, 100k at a time
int chunkSize = 100 * 1024;
byte[] buff = new byte[chunkSize];
int len = chunkSize;
for (; size > 0; size -= len) {
if (size < chunkSize) {
len = (int) size;
} else {
len = chunkSize;
}
int actualBytes = 0;
int off = 0;
do {
actualBytes = zin.read(buff, off, len);
if (actualBytes == -1) {
out.write(buff, off, len);
// System.out.print("!" + len + ':' + actualBytes + ':' + off + ' ');
throw new RuntimeException("Bad math in unzip!");
} else {
out.write(buff, off, actualBytes);
// System.out.print("" + len + ':' + actualBytes + ':' + off + ' ');
}
len -= actualBytes;
size -= actualBytes;
off += actualBytes;
} while ((len > 0));
}
} finally {
// Close the streams
if (fout != null) {
if (out != null) {
if (zin != null) {
zin.close();
}
out.close();
}
fout.close();
}
}
}
}
}
use of java.io.BufferedOutputStream in project cw-omnibus by commonsguy.
the class Downloader method onHandleIntent.
@Override
public void onHandleIntent(Intent i) {
String filename = i.getData().getLastPathSegment();
startForeground(FOREGROUND_ID, buildForegroundNotification(filename));
try {
File output = new File(getFilesDir(), filename);
if (output.exists()) {
output.delete();
}
URL url = new URL(i.getData().toString());
HttpURLConnection c = (HttpURLConnection) url.openConnection();
FileOutputStream fos = new FileOutputStream(output.getPath());
BufferedOutputStream out = new BufferedOutputStream(fos);
try {
InputStream in = c.getInputStream();
byte[] buffer = new byte[8192];
int len = 0;
while ((len = in.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
out.flush();
} finally {
fos.getFD().sync();
out.close();
c.disconnect();
}
raiseNotification(i, output, null);
} catch (IOException e2) {
raiseNotification(i, null, e2);
} finally {
stopForeground(true);
}
}
use of java.io.BufferedOutputStream in project elasticsearch by elastic.
the class RecoverySourceHandler method phase1.
/**
* Perform phase1 of the recovery operations. Once this {@link IndexCommit}
* snapshot has been performed no commit operations (files being fsync'd)
* are effectively allowed on this index until all recovery phases are done
* <p>
* Phase1 examines the segment files on the target node and copies over the
* segments that are missing. Only segments that have the same size and
* checksum can be reused
*/
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
cancellableThreads.checkForCancel();
// Total size of segment files that are recovered
long totalSize = 0;
// Total size of segment files that were able to be re-used
long existingTotalSize = 0;
final Store store = shard.store();
store.incRef();
try {
StopWatch stopWatch = new StopWatch().start();
final Store.MetadataSnapshot recoverySourceMetadata;
try {
recoverySourceMetadata = store.getMetadata(snapshot);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
shard.failShard("recovery", ex);
throw ex;
}
for (String name : snapshot.getFileNames()) {
final StoreFileMetaData md = recoverySourceMetadata.get(name);
if (md == null) {
logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
}
}
// Generate a "diff" of all the identical, different, and missing
// segment files on the target node, using the existing files on
// the source node
String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
if (recoverWithSyncId) {
final long numDocsTarget = request.metadataSnapshot().getNumDocs();
final long numDocsSource = recoverySourceMetadata.getNumDocs();
if (numDocsTarget != numDocsSource) {
throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget + "(" + request.targetNode().getName() + ")");
}
// we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
// so we don't return here
logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId);
} else {
final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
for (StoreFileMetaData md : diff.identical) {
response.phase1ExistingFileNames.add(md.name());
response.phase1ExistingFileSizes.add(md.length());
existingTotalSize += md.length();
if (logger.isTraceEnabled()) {
logger.trace("recovery [phase1]: not recovering [{}], exist in local store and has checksum [{}]," + " size [{}]", md.name(), md.checksum(), md.length());
}
totalSize += md.length();
}
List<StoreFileMetaData> phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size());
phase1Files.addAll(diff.different);
phase1Files.addAll(diff.missing);
for (StoreFileMetaData md : phase1Files) {
if (request.metadataSnapshot().asMap().containsKey(md.name())) {
logger.trace("recovery [phase1]: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
} else {
logger.trace("recovery [phase1]: recovering [{}], does not exist in remote", md.name());
}
response.phase1FileNames.add(md.name());
response.phase1FileSizes.add(md.length());
totalSize += md.length();
}
response.phase1TotalSize = totalSize;
response.phase1ExistingTotalSize = existingTotalSize;
logger.trace("recovery [phase1]: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations()));
// How many bytes we've copied since we last called RateLimiter.pause
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
// are deleted
try {
cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata));
} catch (RemoteTransportException | IOException targetException) {
final IOException corruptIndexException;
// - maybe due to old segments without checksums or length only checks
if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) {
try {
final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
StoreFileMetaData[] metadata = StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
ArrayUtil.timSort(metadata, (o1, o2) -> {
// check small files first
return Long.compare(o1.length(), o2.length());
});
for (StoreFileMetaData md : metadata) {
cancellableThreads.checkForCancel();
logger.debug("checking integrity for file {} after remove corruption exception", md);
if (store.checkIntegrityNoException(md) == false) {
// we are corrupted on the primary -- fail!
shard.failShard("recovery", corruptIndexException);
logger.warn("Corrupted file detected {} checksum mismatch", md);
throw corruptIndexException;
}
}
} catch (IOException ex) {
targetException.addSuppressed(ex);
throw targetException;
}
// corruption has happened on the way to replica
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null);
exception.addSuppressed(targetException);
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", shard.shardId(), request.targetNode()), corruptIndexException);
throw exception;
} else {
throw targetException;
}
}
}
logger.trace("recovery [phase1]: took [{}]", stopWatch.totalTime());
response.phase1Time = stopWatch.totalTime().millis();
} catch (Exception e) {
throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
} finally {
store.decRef();
}
}
Aggregations