Search in sources :

Example 81 with ByteSource

use of com.google.common.io.ByteSource in project druid by druid-io.

the class AzureTaskLogsTest method testStreamTaskLogWithNegative.

@Test
public void testStreamTaskLogWithNegative() throws Exception {
    final String testLog = "hello this is a log";
    final String blobPath = prefix + "/" + taskid + "/log";
    expect(azureStorage.getBlobExists(container, blobPath)).andReturn(true);
    expect(azureStorage.getBlobLength(container, blobPath)).andReturn((long) testLog.length());
    expect(azureStorage.getBlobInputStream(container, blobPath)).andReturn(new ByteArrayInputStream(testLog.getBytes(Charsets.UTF_8)));
    replayAll();
    final Optional<ByteSource> byteSource = azureTaskLogs.streamTaskLog(taskid, -3);
    final StringWriter writer = new StringWriter();
    IOUtils.copy(byteSource.get().openStream(), writer, "UTF-8");
    Assert.assertEquals(writer.toString(), testLog.substring(testLog.length() - 3));
    verifyAll();
}
Also used : StringWriter(java.io.StringWriter) ByteArrayInputStream(java.io.ByteArrayInputStream) ByteSource(com.google.common.io.ByteSource) Test(org.junit.Test)

Example 82 with ByteSource

use of com.google.common.io.ByteSource in project druid by druid-io.

the class HadoopConverterJobTest method setUp.

@Before
public void setUp() throws Exception {
    final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpec = new MetadataStorageUpdaterJobSpec() {

        @Override
        public String getSegmentTable() {
            return derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
        }

        @Override
        public MetadataStorageConnectorConfig get() {
            return derbyConnectorRule.getMetadataConnectorConfig();
        }
    };
    final File scratchFileDir = temporaryFolder.newFolder();
    storageLocProperty = System.getProperty(STORAGE_PROPERTY_KEY);
    tmpSegmentDir = temporaryFolder.newFolder();
    System.setProperty(STORAGE_PROPERTY_KEY, tmpSegmentDir.getAbsolutePath());
    final URL url = Preconditions.checkNotNull(Query.class.getClassLoader().getResource("druid.sample.tsv"));
    final File tmpInputFile = temporaryFolder.newFile();
    FileUtils.retryCopy(new ByteSource() {

        @Override
        public InputStream openStream() throws IOException {
            return url.openStream();
        }
    }, tmpInputFile, FileUtils.IS_EXCEPTION, 3);
    final HadoopDruidIndexerConfig hadoopDruidIndexerConfig = new HadoopDruidIndexerConfig(new HadoopIngestionSpec(new DataSchema(DATASOURCE, HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(new StringInputRowParser(new DelimitedParseSpec(new TimestampSpec("ts", "iso", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList(TestIndex.DIMENSIONS)), null, null), "\t", "", Arrays.asList(TestIndex.COLUMNS)), null), Map.class), new AggregatorFactory[] { new DoubleSumAggregatorFactory(TestIndex.METRICS[0], TestIndex.METRICS[0]), new HyperUniquesAggregatorFactory("quality_uniques", "quality") }, new UniformGranularitySpec(Granularities.MONTH, Granularities.DAY, ImmutableList.<Interval>of(interval)), HadoopDruidIndexerConfig.JSON_MAPPER), new HadoopIOConfig(ImmutableMap.<String, Object>of("type", "static", "paths", tmpInputFile.getAbsolutePath()), metadataStorageUpdaterJobSpec, tmpSegmentDir.getAbsolutePath()), new HadoopTuningConfig(scratchFileDir.getAbsolutePath(), null, null, null, null, null, false, false, false, false, null, false, false, null, null, null, false, false)));
    metadataStorageTablesConfigSupplier = derbyConnectorRule.metadataTablesConfigSupplier();
    connector = derbyConnectorRule.getConnector();
    try {
        connector.getDBI().withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                handle.execute("DROP TABLE druid_segments");
                return null;
            }
        });
    } catch (CallbackFailedException e) {
    // Who cares
    }
    List<Jobby> jobs = ImmutableList.of(new Jobby() {

        @Override
        public boolean run() {
            connector.createSegmentTable(metadataStorageUpdaterJobSpec.getSegmentTable());
            return true;
        }
    }, new HadoopDruidDetermineConfigurationJob(hadoopDruidIndexerConfig), new HadoopDruidIndexerJob(hadoopDruidIndexerConfig, new SQLMetadataStorageUpdaterJobHandler(connector)));
    JobHelper.runJobs(jobs, hadoopDruidIndexerConfig);
}
Also used : HadoopIngestionSpec(io.druid.indexer.HadoopIngestionSpec) HadoopTuningConfig(io.druid.indexer.HadoopTuningConfig) URL(java.net.URL) HadoopIOConfig(io.druid.indexer.HadoopIOConfig) UniformGranularitySpec(io.druid.segment.indexing.granularity.UniformGranularitySpec) TimestampSpec(io.druid.data.input.impl.TimestampSpec) SQLMetadataStorageUpdaterJobHandler(io.druid.indexer.SQLMetadataStorageUpdaterJobHandler) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) InputStream(java.io.InputStream) DelimitedParseSpec(io.druid.data.input.impl.DelimitedParseSpec) IOException(java.io.IOException) HadoopDruidIndexerConfig(io.druid.indexer.HadoopDruidIndexerConfig) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DataSchema(io.druid.segment.indexing.DataSchema) Jobby(io.druid.indexer.Jobby) HadoopDruidIndexerJob(io.druid.indexer.HadoopDruidIndexerJob) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ByteSource(com.google.common.io.ByteSource) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) File(java.io.File) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HadoopDruidDetermineConfigurationJob(io.druid.indexer.HadoopDruidDetermineConfigurationJob) Interval(org.joda.time.Interval) Before(org.junit.Before)

Example 83 with ByteSource

use of com.google.common.io.ByteSource in project keywhiz by square.

the class FileAssetServlet method doGet.

@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    try {
        ByteSource asset = loadAsset(req.getRequestURI());
        if (asset == null) {
            resp.sendError(HttpServletResponse.SC_NOT_FOUND);
            return;
        }
        final String mimeTypeOfExtension = req.getServletContext().getMimeType(req.getRequestURI());
        MediaType mediaType = DEFAULT_MEDIA_TYPE;
        if (mimeTypeOfExtension != null) {
            try {
                mediaType = MediaType.parse(mimeTypeOfExtension);
                if (mediaType.is(MediaType.ANY_TEXT_TYPE)) {
                    mediaType = mediaType.withCharset(DEFAULT_CHARSET);
                }
            } catch (IllegalArgumentException ignore) {
            }
        }
        resp.setContentType(mediaType.type() + "/" + mediaType.subtype());
        if (mediaType.charset().isPresent()) {
            resp.setCharacterEncoding(mediaType.charset().get().toString());
        }
        try (OutputStream output = resp.getOutputStream()) {
            asset.copyTo(output);
        }
    } catch (RuntimeException ignored) {
        resp.sendError(HttpServletResponse.SC_NOT_FOUND);
    }
}
Also used : OutputStream(java.io.OutputStream) ByteSource(com.google.common.io.ByteSource) MediaType(com.google.common.net.MediaType)

Example 84 with ByteSource

use of com.google.common.io.ByteSource in project keywhiz by square.

the class FileAssetServletTest method loadsIndex.

@Test
public void loadsIndex() throws Exception {
    File folder = tempDir.newFolder("loadsIndexTest");
    File indexFile = tempDir.newFile("loadsIndexTest/index.html");
    Files.write("loadsIndexContent", indexFile, UTF_8);
    FileAssetServlet servlet = new FileAssetServlet(folder, "/ui/", "index.html");
    ByteSource byteSource = servlet.loadAsset("/ui/");
    assertThat(byteSource.read()).isEqualTo(Files.toByteArray(indexFile));
}
Also used : ByteSource(com.google.common.io.ByteSource) File(java.io.File) Test(org.junit.Test)

Example 85 with ByteSource

use of com.google.common.io.ByteSource in project keywhiz by square.

the class FileAssetServletTest method loadsAsset.

@Test
public void loadsAsset() throws Exception {
    File folder = tempDir.newFolder("loadsAssetTest");
    File assetFile = tempDir.newFile("loadsAssetTest/asset.txt");
    Files.write("loadsAssetContent", assetFile, UTF_8);
    FileAssetServlet servlet = new FileAssetServlet(folder, "/ui/", "index.html");
    ByteSource byteSource = servlet.loadAsset("/ui/asset.txt");
    assertThat(byteSource.read()).isEqualTo(Files.toByteArray(assetFile));
}
Also used : ByteSource(com.google.common.io.ByteSource) File(java.io.File) Test(org.junit.Test)

Aggregations

ByteSource (com.google.common.io.ByteSource)139 IOException (java.io.IOException)59 Test (org.junit.Test)58 InputStream (java.io.InputStream)42 ByteArrayInputStream (java.io.ByteArrayInputStream)33 File (java.io.File)33 ContentItemImpl (ddf.catalog.content.data.impl.ContentItemImpl)18 Metacard (ddf.catalog.data.Metacard)17 ContentItem (ddf.catalog.content.data.ContentItem)16 StringWriter (java.io.StringWriter)14 FileInputStream (java.io.FileInputStream)13 Test (org.junit.jupiter.api.Test)12 URI (java.net.URI)11 URL (java.net.URL)11 Path (java.nio.file.Path)11 ArrayList (java.util.ArrayList)11 CreateStorageRequestImpl (ddf.catalog.content.operation.impl.CreateStorageRequestImpl)9 ByteArrayOutputStream (java.io.ByteArrayOutputStream)9 TemporaryFileBackedOutputStream (org.codice.ddf.platform.util.TemporaryFileBackedOutputStream)9 FilterInputStream (java.io.FilterInputStream)8