use of com.google.common.io.ByteSource in project bitrafael_public by GENERALBYTESCOM.
the class XMRMnemonicUtility method getDictionary.
private static Dictionary getDictionary() {
if (dictionary == null) {
URL url = Resources.getResource("xmr_english_dictionary.txt");
ByteSource source = Resources.asByteSource(url);
dictionary = new Dictionary(source);
}
return dictionary;
}
use of com.google.common.io.ByteSource in project druid by druid-io.
the class ByteBufferWriteOutBytes method asInputStream.
@Override
public InputStream asInputStream() throws IOException {
checkOpen();
Function<ByteBuffer, ByteSource> byteBufferToByteSource = buf -> new ByteSource() {
@Override
public InputStream openStream() {
ByteBuffer inputBuf = buf.duplicate();
inputBuf.flip();
return new ByteBufferInputStream(inputBuf);
}
};
return ByteSource.concat(buffers.stream().map(byteBufferToByteSource).collect(Collectors.toList())).openStream();
}
use of com.google.common.io.ByteSource in project druid by druid-io.
the class S3DataSegmentPuller method getSegmentFiles.
FileUtils.FileCopyResult getSegmentFiles(final CloudObjectLocation s3Coords, final File outDir) throws SegmentLoadingException {
log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir);
if (!isObjectInBucket(s3Coords)) {
throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords);
}
try {
FileUtils.mkdirp(outDir);
final URI uri = s3Coords.toUri(S3StorageDruidModule.SCHEME);
final ByteSource byteSource = new ByteSource() {
@Override
public InputStream openStream() throws IOException {
try {
return buildFileObject(uri).openInputStream();
} catch (AmazonServiceException e) {
if (e.getCause() != null) {
if (S3Utils.S3RETRY.apply(e)) {
throw new IOException("Recoverable exception", e);
}
}
throw new RuntimeException(e);
}
}
};
if (CompressionUtils.isZip(s3Coords.getPath())) {
final FileUtils.FileCopyResult result = CompressionUtils.unzip(byteSource, outDir, S3Utils.S3RETRY, false);
log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outDir.getAbsolutePath());
return result;
}
if (CompressionUtils.isGz(s3Coords.getPath())) {
final String fname = Files.getNameWithoutExtension(uri.getPath());
final File outFile = new File(outDir, fname);
final FileUtils.FileCopyResult result = CompressionUtils.gunzip(byteSource, outFile, S3Utils.S3RETRY);
log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outFile.getAbsolutePath());
return result;
}
throw new IAE("Do not know how to load file type at [%s]", uri.toString());
} catch (Exception e) {
try {
FileUtils.deleteDirectory(outDir);
} catch (IOException ioe) {
log.warn(ioe, "Failed to remove output directory [%s] for segment pulled from [%s]", outDir.getAbsolutePath(), s3Coords.toString());
}
throw new SegmentLoadingException(e, e.getMessage());
}
}
use of com.google.common.io.ByteSource in project druid by druid-io.
the class HttpRemoteTaskRunner method streamTaskReports.
@Override
public Optional<ByteSource> streamTaskReports(String taskId) {
// Read on tasks is safe
@SuppressWarnings("GuardedBy") HttpRemoteTaskRunnerWorkItem taskRunnerWorkItem = tasks.get(taskId);
Worker worker = null;
if (taskRunnerWorkItem != null && taskRunnerWorkItem.getState() != HttpRemoteTaskRunnerWorkItem.State.COMPLETE) {
worker = taskRunnerWorkItem.getWorker();
}
if (worker == null || !workers.containsKey(worker.getHost())) {
// Worker is not running this task, it might be available in deep storage
return Optional.absent();
} else {
// Worker is still running this task
TaskLocation taskLocation = taskRunnerWorkItem.getLocation();
final URL url = TaskRunnerUtils.makeTaskLocationURL(taskLocation, "/druid/worker/v1/chat/%s/liveReports", taskId);
return Optional.of(new ByteSource() {
@Override
public InputStream openStream() throws IOException {
try {
return httpClient.go(new Request(HttpMethod.GET, url), new InputStreamResponseHandler()).get();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (ExecutionException e) {
// Unwrap if possible
Throwables.propagateIfPossible(e.getCause(), IOException.class);
throw new RuntimeException(e);
}
}
});
}
}
use of com.google.common.io.ByteSource in project druid by druid-io.
the class HttpRemoteTaskRunner method streamTaskLog.
@Override
public Optional<ByteSource> streamTaskLog(String taskId, long offset) {
// Read on tasks is safe
@SuppressWarnings("GuardedBy") HttpRemoteTaskRunnerWorkItem taskRunnerWorkItem = tasks.get(taskId);
Worker worker = null;
if (taskRunnerWorkItem != null && taskRunnerWorkItem.getState() != HttpRemoteTaskRunnerWorkItem.State.COMPLETE) {
worker = taskRunnerWorkItem.getWorker();
}
if (worker == null || !workers.containsKey(worker.getHost())) {
// Worker is not running this task, it might be available in deep storage
return Optional.absent();
} else {
// Worker is still running this task
final URL url = TaskRunnerUtils.makeWorkerURL(worker, "/druid/worker/v1/task/%s/log?offset=%s", taskId, Long.toString(offset));
return Optional.of(new ByteSource() {
@Override
public InputStream openStream() throws IOException {
try {
return httpClient.go(new Request(HttpMethod.GET, url), new InputStreamResponseHandler()).get();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (ExecutionException e) {
// Unwrap if possible
Throwables.propagateIfPossible(e.getCause(), IOException.class);
throw new RuntimeException(e);
}
}
});
}
}
Aggregations