Search in sources :

Example 1 with BufferedOutputStream

use of java.io.BufferedOutputStream in project elasticsearch by elastic.

the class MockTcpTransport method sendMessage.

@Override
protected void sendMessage(MockChannel mockChannel, BytesReference reference, Runnable sendListener) throws IOException {
    synchronized (mockChannel) {
        final Socket socket = mockChannel.activeChannel;
        OutputStream outputStream = new BufferedOutputStream(socket.getOutputStream());
        reference.writeTo(outputStream);
        outputStream.flush();
    }
    if (sendListener != null) {
        sendListener.run();
    }
}
Also used : BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BufferedOutputStream(java.io.BufferedOutputStream) Socket(java.net.Socket) ServerSocket(java.net.ServerSocket) MockSocket(org.elasticsearch.mocksocket.MockSocket) MockServerSocket(org.elasticsearch.mocksocket.MockServerSocket)

Example 2 with BufferedOutputStream

use of java.io.BufferedOutputStream in project buck by facebook.

the class ExopackageSoLoader method copySoFileIfRequired.

private static File copySoFileIfRequired(String libname) {
    File libraryFile = new File(privateNativeLibsDir, libname + ".so");
    if (libraryFile.exists()) {
        return libraryFile;
    }
    if (!abi1Libraries.containsKey(libname) && !abi2Libraries.containsKey(libname)) {
        return null;
    }
    String abiDir;
    String sourceFilename;
    if (abi1Libraries.containsKey(libname)) {
        sourceFilename = abi1Libraries.get(libname);
        abiDir = Build.CPU_ABI;
    } else {
        sourceFilename = abi2Libraries.get(libname);
        abiDir = Build.CPU_ABI2;
    }
    String sourcePath = nativeLibsDir + abiDir + "/" + sourceFilename;
    try {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = new BufferedInputStream(new FileInputStream(sourcePath));
            out = new BufferedOutputStream(new FileOutputStream(libraryFile));
            byte[] buffer = new byte[4 * 1024];
            int len;
            while ((len = in.read(buffer)) > 0) {
                out.write(buffer, 0, len);
            }
        } finally {
            if (in != null) {
                in.close();
            }
            if (out != null) {
                out.close();
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    return libraryFile;
}
Also used : BufferedInputStream(java.io.BufferedInputStream) BufferedInputStream(java.io.BufferedInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) File(java.io.File) BufferedOutputStream(java.io.BufferedOutputStream) FileInputStream(java.io.FileInputStream)

Example 3 with BufferedOutputStream

use of java.io.BufferedOutputStream in project cogtool by cogtool.

the class ZipUtil method unzip.

/**
   * Unzips a zipfile to a destination directory.
   * @param zip the file to unzip
   * @param fileDir the destination directory for zipfile contents
   * @throws FileNotFoundException
   * @throws IOException
   */
public static void unzip(ZipFile zip, File fileDir) throws FileNotFoundException, IOException {
    // Read out all entries from ZipFile via input streams
    for (Enumeration<? extends ZipEntry> en = zip.entries(); en.hasMoreElements(); ) {
        ZipEntry ze = en.nextElement();
        // Get info from file entry
        long size = ze.getSize();
        // Create File in fileDir for unpacked entry
        String name = ze.getName();
        //      System.out.println("Unzipping: " + name);
        File zeFile = new File(fileDir, name);
        // Check for a trailing slash to see if this is a directory entry
        if (name.charAt(name.length() - 1) == '/') {
            // If this entry is a directory, make it
            zeFile.mkdirs();
        } else {
            // if this entry is a file, make its parent directories, then it
            zeFile.getParentFile().mkdirs();
            zeFile.createNewFile();
            // Create plus OutputStream to the new file
            FileOutputStream fout = null;
            OutputStream out = null;
            // Get ZipInputStream for reading data
            InputStream zin = null;
            try {
                fout = new FileOutputStream(zeFile);
                out = new BufferedOutputStream(fout);
                zin = zip.getInputStream(ze);
                // Set modification time
                zeFile.setLastModified(ze.getTime());
                // Copy data from zin to out, 100k at a time
                int chunkSize = 100 * 1024;
                byte[] buff = new byte[chunkSize];
                int len = chunkSize;
                for (; size > 0; size -= len) {
                    if (size < chunkSize) {
                        len = (int) size;
                    } else {
                        len = chunkSize;
                    }
                    int actualBytes = 0;
                    int off = 0;
                    do {
                        actualBytes = zin.read(buff, off, len);
                        if (actualBytes == -1) {
                            out.write(buff, off, len);
                            //                System.out.print("!" + len + ':' + actualBytes + ':' + off + ' ');
                            throw new RuntimeException("Bad math in unzip!");
                        } else {
                            out.write(buff, off, actualBytes);
                        //                System.out.print("" + len + ':' + actualBytes + ':' + off + ' ');
                        }
                        len -= actualBytes;
                        size -= actualBytes;
                        off += actualBytes;
                    } while ((len > 0));
                }
            } finally {
                // Close the streams
                if (fout != null) {
                    if (out != null) {
                        if (zin != null) {
                            zin.close();
                        }
                        out.close();
                    }
                    fout.close();
                }
            }
        }
    }
}
Also used : FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) ZipEntry(java.util.zip.ZipEntry) FileOutputStream(java.io.FileOutputStream) OutputStream(java.io.OutputStream) ZipOutputStream(java.util.zip.ZipOutputStream) FileOutputStream(java.io.FileOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) File(java.io.File) ZipFile(java.util.zip.ZipFile) BufferedOutputStream(java.io.BufferedOutputStream)

Example 4 with BufferedOutputStream

use of java.io.BufferedOutputStream in project cw-omnibus by commonsguy.

the class Downloader method onHandleIntent.

@Override
public void onHandleIntent(Intent i) {
    String filename = i.getData().getLastPathSegment();
    startForeground(FOREGROUND_ID, buildForegroundNotification(filename));
    try {
        File output = new File(getFilesDir(), filename);
        if (output.exists()) {
            output.delete();
        }
        URL url = new URL(i.getData().toString());
        HttpURLConnection c = (HttpURLConnection) url.openConnection();
        FileOutputStream fos = new FileOutputStream(output.getPath());
        BufferedOutputStream out = new BufferedOutputStream(fos);
        try {
            InputStream in = c.getInputStream();
            byte[] buffer = new byte[8192];
            int len = 0;
            while ((len = in.read(buffer)) >= 0) {
                out.write(buffer, 0, len);
            }
            out.flush();
        } finally {
            fos.getFD().sync();
            out.close();
            c.disconnect();
        }
        raiseNotification(i, output, null);
    } catch (IOException e2) {
        raiseNotification(i, null, e2);
    } finally {
        stopForeground(true);
    }
}
Also used : HttpURLConnection(java.net.HttpURLConnection) InputStream(java.io.InputStream) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) File(java.io.File) BufferedOutputStream(java.io.BufferedOutputStream) URL(java.net.URL)

Example 5 with BufferedOutputStream

use of java.io.BufferedOutputStream in project elasticsearch by elastic.

the class RecoverySourceHandler method phase1.

/**
     * Perform phase1 of the recovery operations. Once this {@link IndexCommit}
     * snapshot has been performed no commit operations (files being fsync'd)
     * are effectively allowed on this index until all recovery phases are done
     * <p>
     * Phase1 examines the segment files on the target node and copies over the
     * segments that are missing. Only segments that have the same size and
     * checksum can be reused
     */
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
        StopWatch stopWatch = new StopWatch().start();
        final Store.MetadataSnapshot recoverySourceMetadata;
        try {
            recoverySourceMetadata = store.getMetadata(snapshot);
        } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
            shard.failShard("recovery", ex);
            throw ex;
        }
        for (String name : snapshot.getFileNames()) {
            final StoreFileMetaData md = recoverySourceMetadata.get(name);
            if (md == null) {
                logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
            }
        }
        // Generate a "diff" of all the identical, different, and missing
        // segment files on the target node, using the existing files on
        // the source node
        String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
        String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
        final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
        if (recoverWithSyncId) {
            final long numDocsTarget = request.metadataSnapshot().getNumDocs();
            final long numDocsSource = recoverySourceMetadata.getNumDocs();
            if (numDocsTarget != numDocsSource) {
                throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget + "(" + request.targetNode().getName() + ")");
            }
            // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
            // so we don't return here
            logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId);
        } else {
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("recovery [phase1]: not recovering [{}], exist in local store and has checksum [{}]," + " size [{}]", md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            List<StoreFileMetaData> phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size());
            phase1Files.addAll(diff.different);
            phase1Files.addAll(diff.missing);
            for (StoreFileMetaData md : phase1Files) {
                if (request.metadataSnapshot().asMap().containsKey(md.name())) {
                    logger.trace("recovery [phase1]: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
                } else {
                    logger.trace("recovery [phase1]: recovering [{}], does not exist in remote", md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;
            logger.trace("recovery [phase1]: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
            cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations()));
            // How many bytes we've copied since we last called RateLimiter.pause
            final Function<StoreFileMetaData, OutputStream> outputStreamFactories = md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
            sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
            // are deleted
            try {
                cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata));
            } catch (RemoteTransportException | IOException targetException) {
                final IOException corruptIndexException;
                //   - maybe due to old segments without checksums or length only checks
                if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) {
                    try {
                        final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
                        StoreFileMetaData[] metadata = StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
                        ArrayUtil.timSort(metadata, (o1, o2) -> {
                            // check small files first
                            return Long.compare(o1.length(), o2.length());
                        });
                        for (StoreFileMetaData md : metadata) {
                            cancellableThreads.checkForCancel();
                            logger.debug("checking integrity for file {} after remove corruption exception", md);
                            if (store.checkIntegrityNoException(md) == false) {
                                // we are corrupted on the primary -- fail!
                                shard.failShard("recovery", corruptIndexException);
                                logger.warn("Corrupted file detected {} checksum mismatch", md);
                                throw corruptIndexException;
                            }
                        }
                    } catch (IOException ex) {
                        targetException.addSuppressed(ex);
                        throw targetException;
                    }
                    // corruption has happened on the way to replica
                    RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null);
                    exception.addSuppressed(targetException);
                    logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", shard.shardId(), request.targetNode()), corruptIndexException);
                    throw exception;
                } else {
                    throw targetException;
                }
            }
        }
        logger.trace("recovery [phase1]: took [{}]", stopWatch.totalTime());
        response.phase1Time = stopWatch.totalTime().millis();
    } catch (Exception e) {
        throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
        store.decRef();
    }
}
Also used : IndexCommit(org.apache.lucene.index.IndexCommit) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) Nullable(org.elasticsearch.common.Nullable) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Function(java.util.function.Function) Supplier(java.util.function.Supplier) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) BufferedOutputStream(java.io.BufferedOutputStream) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) Settings(org.elasticsearch.common.settings.Settings) SequenceNumbersService(org.elasticsearch.index.seqno.SequenceNumbersService) Store(org.elasticsearch.index.store.Store) LocalCheckpointTracker(org.elasticsearch.index.seqno.LocalCheckpointTracker) Streams(org.elasticsearch.common.io.Streams) StreamSupport(java.util.stream.StreamSupport) IOContext(org.apache.lucene.store.IOContext) Releasable(org.elasticsearch.common.lease.Releasable) Loggers(org.elasticsearch.common.logging.Loggers) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) OutputStream(java.io.OutputStream) ArrayUtil(org.apache.lucene.util.ArrayUtil) IndexShardState(org.elasticsearch.index.shard.IndexShardState) IndexInput(org.apache.lucene.store.IndexInput) IndexShard(org.elasticsearch.index.shard.IndexShard) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) StopWatch(org.elasticsearch.common.StopWatch) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) List(java.util.List) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) Translog(org.elasticsearch.index.translog.Translog) RateLimiter(org.apache.lucene.store.RateLimiter) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) ArrayList(java.util.ArrayList) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Store(org.elasticsearch.index.store.Store) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) Supplier(java.util.function.Supplier) BufferedOutputStream(java.io.BufferedOutputStream) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) StopWatch(org.elasticsearch.common.StopWatch) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException)

Aggregations

BufferedOutputStream (java.io.BufferedOutputStream)2410 FileOutputStream (java.io.FileOutputStream)1651 IOException (java.io.IOException)1232 File (java.io.File)1056 OutputStream (java.io.OutputStream)789 BufferedInputStream (java.io.BufferedInputStream)527 InputStream (java.io.InputStream)374 FileInputStream (java.io.FileInputStream)332 DataOutputStream (java.io.DataOutputStream)242 ByteArrayOutputStream (java.io.ByteArrayOutputStream)227 ZipEntry (java.util.zip.ZipEntry)212 ZipOutputStream (java.util.zip.ZipOutputStream)209 FileNotFoundException (java.io.FileNotFoundException)189 ZipFile (java.util.zip.ZipFile)115 ArrayList (java.util.ArrayList)107 URL (java.net.URL)101 PrintStream (java.io.PrintStream)98 ObjectOutputStream (java.io.ObjectOutputStream)97 Test (org.junit.Test)94 ByteArrayInputStream (java.io.ByteArrayInputStream)89