Search in sources :

Example 41 with ByteSource

use of com.google.common.io.ByteSource in project bitrafael_public by GENERALBYTESCOM.

the class XMRMnemonicUtility method getDictionary.

private static Dictionary getDictionary() {
    if (dictionary == null) {
        URL url = Resources.getResource("xmr_english_dictionary.txt");
        ByteSource source = Resources.asByteSource(url);
        dictionary = new Dictionary(source);
    }
    return dictionary;
}
Also used : ByteSource(com.google.common.io.ByteSource) URL(java.net.URL)

Example 42 with ByteSource

use of com.google.common.io.ByteSource in project druid by druid-io.

the class ByteBufferWriteOutBytes method asInputStream.

@Override
public InputStream asInputStream() throws IOException {
    checkOpen();
    Function<ByteBuffer, ByteSource> byteBufferToByteSource = buf -> new ByteSource() {

        @Override
        public InputStream openStream() {
            ByteBuffer inputBuf = buf.duplicate();
            inputBuf.flip();
            return new ByteBufferInputStream(inputBuf);
        }
    };
    return ByteSource.concat(buffers.stream().map(byteBufferToByteSource).collect(Collectors.toList())).openStream();
}
Also used : IOException(java.io.IOException) Channels(org.apache.druid.io.Channels) Ints(com.google.common.primitives.Ints) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) BufferUnderflowException(java.nio.BufferUnderflowException) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) WritableByteChannel(java.nio.channels.WritableByteChannel) Preconditions(com.google.common.base.Preconditions) ByteBufferInputStream(org.apache.druid.io.ByteBufferInputStream) ByteSource(com.google.common.io.ByteSource) IAE(org.apache.druid.java.util.common.IAE) InputStream(java.io.InputStream) ByteBufferInputStream(org.apache.druid.io.ByteBufferInputStream) ByteSource(com.google.common.io.ByteSource) ByteBuffer(java.nio.ByteBuffer)

Example 43 with ByteSource

use of com.google.common.io.ByteSource in project druid by druid-io.

the class S3DataSegmentPuller method getSegmentFiles.

FileUtils.FileCopyResult getSegmentFiles(final CloudObjectLocation s3Coords, final File outDir) throws SegmentLoadingException {
    log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir);
    if (!isObjectInBucket(s3Coords)) {
        throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords);
    }
    try {
        FileUtils.mkdirp(outDir);
        final URI uri = s3Coords.toUri(S3StorageDruidModule.SCHEME);
        final ByteSource byteSource = new ByteSource() {

            @Override
            public InputStream openStream() throws IOException {
                try {
                    return buildFileObject(uri).openInputStream();
                } catch (AmazonServiceException e) {
                    if (e.getCause() != null) {
                        if (S3Utils.S3RETRY.apply(e)) {
                            throw new IOException("Recoverable exception", e);
                        }
                    }
                    throw new RuntimeException(e);
                }
            }
        };
        if (CompressionUtils.isZip(s3Coords.getPath())) {
            final FileUtils.FileCopyResult result = CompressionUtils.unzip(byteSource, outDir, S3Utils.S3RETRY, false);
            log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outDir.getAbsolutePath());
            return result;
        }
        if (CompressionUtils.isGz(s3Coords.getPath())) {
            final String fname = Files.getNameWithoutExtension(uri.getPath());
            final File outFile = new File(outDir, fname);
            final FileUtils.FileCopyResult result = CompressionUtils.gunzip(byteSource, outFile, S3Utils.S3RETRY);
            log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outFile.getAbsolutePath());
            return result;
        }
        throw new IAE("Do not know how to load file type at [%s]", uri.toString());
    } catch (Exception e) {
        try {
            FileUtils.deleteDirectory(outDir);
        } catch (IOException ioe) {
            log.warn(ioe, "Failed to remove output directory [%s] for segment pulled from [%s]", outDir.getAbsolutePath(), s3Coords.toString());
        }
        throw new SegmentLoadingException(e, e.getMessage());
    }
}
Also used : SegmentLoadingException(org.apache.druid.segment.loading.SegmentLoadingException) FileUtils(org.apache.druid.java.util.common.FileUtils) AmazonServiceException(com.amazonaws.AmazonServiceException) ByteSource(com.google.common.io.ByteSource) IOException(java.io.IOException) IAE(org.apache.druid.java.util.common.IAE) URI(java.net.URI) File(java.io.File) SegmentLoadingException(org.apache.druid.segment.loading.SegmentLoadingException) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) AmazonServiceException(com.amazonaws.AmazonServiceException) IOException(java.io.IOException) AmazonClientException(com.amazonaws.AmazonClientException)

Example 44 with ByteSource

use of com.google.common.io.ByteSource in project druid by druid-io.

the class HttpRemoteTaskRunner method streamTaskReports.

@Override
public Optional<ByteSource> streamTaskReports(String taskId) {
    // Read on tasks is safe
    @SuppressWarnings("GuardedBy") HttpRemoteTaskRunnerWorkItem taskRunnerWorkItem = tasks.get(taskId);
    Worker worker = null;
    if (taskRunnerWorkItem != null && taskRunnerWorkItem.getState() != HttpRemoteTaskRunnerWorkItem.State.COMPLETE) {
        worker = taskRunnerWorkItem.getWorker();
    }
    if (worker == null || !workers.containsKey(worker.getHost())) {
        // Worker is not running this task, it might be available in deep storage
        return Optional.absent();
    } else {
        // Worker is still running this task
        TaskLocation taskLocation = taskRunnerWorkItem.getLocation();
        final URL url = TaskRunnerUtils.makeTaskLocationURL(taskLocation, "/druid/worker/v1/chat/%s/liveReports", taskId);
        return Optional.of(new ByteSource() {

            @Override
            public InputStream openStream() throws IOException {
                try {
                    return httpClient.go(new Request(HttpMethod.GET, url), new InputStreamResponseHandler()).get();
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                } catch (ExecutionException e) {
                    // Unwrap if possible
                    Throwables.propagateIfPossible(e.getCause(), IOException.class);
                    throw new RuntimeException(e);
                }
            }
        });
    }
}
Also used : InputStream(java.io.InputStream) Request(org.apache.druid.java.util.http.client.Request) IOException(java.io.IOException) TaskLocation(org.apache.druid.indexer.TaskLocation) URL(java.net.URL) InputStreamResponseHandler(org.apache.druid.java.util.http.client.response.InputStreamResponseHandler) Worker(org.apache.druid.indexing.worker.Worker) ByteSource(com.google.common.io.ByteSource) ExecutionException(java.util.concurrent.ExecutionException)

Example 45 with ByteSource

use of com.google.common.io.ByteSource in project druid by druid-io.

the class HttpRemoteTaskRunner method streamTaskLog.

@Override
public Optional<ByteSource> streamTaskLog(String taskId, long offset) {
    // Read on tasks is safe
    @SuppressWarnings("GuardedBy") HttpRemoteTaskRunnerWorkItem taskRunnerWorkItem = tasks.get(taskId);
    Worker worker = null;
    if (taskRunnerWorkItem != null && taskRunnerWorkItem.getState() != HttpRemoteTaskRunnerWorkItem.State.COMPLETE) {
        worker = taskRunnerWorkItem.getWorker();
    }
    if (worker == null || !workers.containsKey(worker.getHost())) {
        // Worker is not running this task, it might be available in deep storage
        return Optional.absent();
    } else {
        // Worker is still running this task
        final URL url = TaskRunnerUtils.makeWorkerURL(worker, "/druid/worker/v1/task/%s/log?offset=%s", taskId, Long.toString(offset));
        return Optional.of(new ByteSource() {

            @Override
            public InputStream openStream() throws IOException {
                try {
                    return httpClient.go(new Request(HttpMethod.GET, url), new InputStreamResponseHandler()).get();
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                } catch (ExecutionException e) {
                    // Unwrap if possible
                    Throwables.propagateIfPossible(e.getCause(), IOException.class);
                    throw new RuntimeException(e);
                }
            }
        });
    }
}
Also used : InputStream(java.io.InputStream) Request(org.apache.druid.java.util.http.client.Request) IOException(java.io.IOException) URL(java.net.URL) InputStreamResponseHandler(org.apache.druid.java.util.http.client.response.InputStreamResponseHandler) Worker(org.apache.druid.indexing.worker.Worker) ByteSource(com.google.common.io.ByteSource) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

ByteSource (com.google.common.io.ByteSource)138 IOException (java.io.IOException)58 Test (org.junit.Test)58 InputStream (java.io.InputStream)42 ByteArrayInputStream (java.io.ByteArrayInputStream)33 File (java.io.File)33 ContentItemImpl (ddf.catalog.content.data.impl.ContentItemImpl)18 Metacard (ddf.catalog.data.Metacard)17 ContentItem (ddf.catalog.content.data.ContentItem)16 StringWriter (java.io.StringWriter)14 FileInputStream (java.io.FileInputStream)13 Test (org.junit.jupiter.api.Test)12 URI (java.net.URI)11 Path (java.nio.file.Path)11 ArrayList (java.util.ArrayList)11 URL (java.net.URL)10 CreateStorageRequestImpl (ddf.catalog.content.operation.impl.CreateStorageRequestImpl)9 ByteArrayOutputStream (java.io.ByteArrayOutputStream)9 TemporaryFileBackedOutputStream (org.codice.ddf.platform.util.TemporaryFileBackedOutputStream)9 FilterInputStream (java.io.FilterInputStream)8