Search in sources :

Example 1 with MetadataStorageTablesConfig

use of io.druid.metadata.MetadataStorageTablesConfig in project druid by druid-io.

the class MetadataStorageTablesConfigTest method testSerdeMetadataStorageTablesConfig.

@Test
public void testSerdeMetadataStorageTablesConfig() throws Exception {
    Injector injector = Guice.createInjector(new Module() {

        @Override
        public void configure(Binder binder) {
            binder.install(new PropertiesModule(Arrays.asList("test.runtime.properties")));
            binder.install(new ConfigModule());
            binder.install(new DruidGuiceExtensions());
            JsonConfigProvider.bind(binder, "druid.metadata.storage.tables", MetadataStorageTablesConfig.class);
        }

        @Provides
        @LazySingleton
        public ObjectMapper jsonMapper() {
            return new DefaultObjectMapper();
        }
    });
    Properties props = injector.getInstance(Properties.class);
    MetadataStorageTablesConfig config = injector.getInstance(MetadataStorageTablesConfig.class);
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.base"), config.getBase());
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.segments"), config.getSegmentsTable());
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.rules"), config.getRulesTable());
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.config"), config.getConfigTable());
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.tasks"), config.getEntryTable(MetadataStorageTablesConfig.TASK_ENTRY_TYPE));
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.taskLog"), config.getLogTable(MetadataStorageTablesConfig.TASK_ENTRY_TYPE));
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.taskLock"), config.getLockTable(MetadataStorageTablesConfig.TASK_ENTRY_TYPE));
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.dataSource"), config.getDataSourceTable());
    Assert.assertEquals(props.getProperty("druid.metadata.storage.tables.supervisors"), config.getSupervisorTable());
}
Also used : Provides(com.google.inject.Provides) Properties(java.util.Properties) Binder(com.google.inject.Binder) MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) Injector(com.google.inject.Injector) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Module(com.google.inject.Module) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Example 2 with MetadataStorageTablesConfig

use of io.druid.metadata.MetadataStorageTablesConfig in project druid by druid-io.

the class ResetCluster method resetMetadataStore.

private void resetMetadataStore(Injector injector) {
    log.info("===========================================================================");
    log.info("Deleting all Records from Metadata Storage.");
    log.info("===========================================================================");
    MetadataStorageConnector connector = injector.getInstance(MetadataStorageConnector.class);
    MetadataStorageTablesConfig tablesConfig = injector.getInstance(MetadataStorageTablesConfig.class);
    String[] tables = new String[] { tablesConfig.getDataSourceTable(), tablesConfig.getPendingSegmentsTable(), tablesConfig.getSegmentsTable(), tablesConfig.getRulesTable(), tablesConfig.getConfigTable(), tablesConfig.getTasksTable(), tablesConfig.getTaskLockTable(), tablesConfig.getTaskLogTable(), tablesConfig.getAuditTable(), tablesConfig.getSupervisorTable() };
    for (String table : tables) {
        connector.deleteAllRecords(table);
    }
}
Also used : MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) MetadataStorageConnector(io.druid.metadata.MetadataStorageConnector)

Example 3 with MetadataStorageTablesConfig

use of io.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class DruidStorageHandlerUtils method publishSegmentsAndCommit.

/**
 * First computes the segments timeline to accommodate new segments for insert into case
 * Then moves segments to druid deep storage with updated metadata/version
 * ALL IS DONE IN ONE TRANSACTION
 *
 * @param connector DBI connector to commit
 * @param metadataStorageTablesConfig Druid metadata tables definitions
 * @param dataSource Druid datasource name
 * @param segments List of segments to move and commit to metadata
 * @param overwrite if it is an insert overwrite
 * @param conf Configuration
 * @param dataSegmentPusher segment pusher
 *
 * @return List of successfully published Druid segments.
 * This list has the updated versions and metadata about segments after move and timeline sorting
 *
 * @throws CallbackFailedException
 */
public static List<DataSegment> publishSegmentsAndCommit(final SQLMetadataConnector connector, final MetadataStorageTablesConfig metadataStorageTablesConfig, final String dataSource, final List<DataSegment> segments, boolean overwrite, Configuration conf, DataSegmentPusher dataSegmentPusher) throws CallbackFailedException {
    return connector.getDBI().inTransaction((handle, transactionStatus) -> {
        // We create the timeline for the existing and new segments
        VersionedIntervalTimeline<String, DataSegment> timeline;
        if (overwrite) {
            // If we are overwriting, we disable existing sources
            disableDataSourceWithHandle(handle, metadataStorageTablesConfig, dataSource);
            // When overwriting, we just start with empty timeline,
            // as we are overwriting segments with new versions
            timeline = new VersionedIntervalTimeline<>(Ordering.natural());
        } else {
            // Append Mode
            if (segments.isEmpty()) {
                // If there are no new segments, we can just bail out
                return Collections.EMPTY_LIST;
            }
            // Otherwise, build a timeline of existing segments in metadata storage
            Interval indexedInterval = JodaUtils.umbrellaInterval(Iterables.transform(segments, input -> input.getInterval()));
            LOG.info("Building timeline for umbrella Interval [{}]", indexedInterval);
            timeline = getTimelineForIntervalWithHandle(handle, dataSource, indexedInterval, metadataStorageTablesConfig);
        }
        final List<DataSegment> finalSegmentsToPublish = Lists.newArrayList();
        for (DataSegment segment : segments) {
            List<TimelineObjectHolder<String, DataSegment>> existingChunks = timeline.lookup(segment.getInterval());
            if (existingChunks.size() > 1) {
                // Druid shard specs does not support multiple partitions for same interval with different granularity.
                throw new IllegalStateException(String.format("Cannot allocate new segment for dataSource[%s], interval[%s], already have [%,d] chunks. Not possible to append new segment.", dataSource, segment.getInterval(), existingChunks.size()));
            }
            // Find out the segment with latest version and maximum partition number
            SegmentIdentifier max = null;
            final ShardSpec newShardSpec;
            final String newVersion;
            if (!existingChunks.isEmpty()) {
                // Some existing chunk, Find max
                TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
                for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
                    if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
                        max = SegmentIdentifier.fromDataSegment(existing.getObject());
                    }
                }
            }
            if (max == null) {
                // No existing shard present in the database, use the current version.
                newShardSpec = segment.getShardSpec();
                newVersion = segment.getVersion();
            } else {
                // use version of existing max segment to generate new shard spec
                newShardSpec = getNextPartitionShardSpec(max.getShardSpec());
                newVersion = max.getVersion();
            }
            DataSegment publishedSegment = publishSegmentWithShardSpec(segment, newShardSpec, newVersion, getPath(segment).getFileSystem(conf), dataSegmentPusher);
            finalSegmentsToPublish.add(publishedSegment);
            timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment));
        }
        // Publish new segments to metadata storage
        final PreparedBatch batch = handle.prepareBatch(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, \"end\", partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", metadataStorageTablesConfig.getSegmentsTable()));
        for (final DataSegment segment : finalSegmentsToPublish) {
            batch.add(new ImmutableMap.Builder<String, Object>().put("id", segment.getIdentifier()).put("dataSource", segment.getDataSource()).put("created_date", new DateTime().toString()).put("start", segment.getInterval().getStart().toString()).put("end", segment.getInterval().getEnd().toString()).put("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).put("version", segment.getVersion()).put("used", true).put("payload", JSON_MAPPER.writeValueAsBytes(segment)).build());
            LOG.info("Published {}", segment.getIdentifier());
        }
        batch.execute();
        return finalSegmentsToPublish;
    });
}
Also used : SQLMetadataConnector(io.druid.metadata.SQLMetadataConnector) FoldController(org.skife.jdbi.v2.FoldController) Request(com.metamx.http.client.Request) FileSystem(org.apache.hadoop.fs.FileSystem) URL(java.net.URL) HttpMethod(org.jboss.netty.handler.codec.http.HttpMethod) LoggerFactory(org.slf4j.LoggerFactory) RetryPolicies(org.apache.hadoop.io.retry.RetryPolicies) FileStatus(org.apache.hadoop.fs.FileStatus) StatementContext(org.skife.jdbi.v2.StatementContext) InetAddress(java.net.InetAddress) SelectQueryConfig(io.druid.query.select.SelectQueryConfig) InputStreamResponseHandler(com.metamx.http.client.response.InputStreamResponseHandler) IndexIO(io.druid.segment.IndexIO) CharStreams(com.google.common.io.CharStreams) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) NamedType(com.fasterxml.jackson.databind.jsontype.NamedType) Path(org.apache.hadoop.fs.Path) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) TimestampFloorExprMacro(io.druid.query.expression.TimestampFloorExprMacro) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) ByteArrayMapper(org.skife.jdbi.v2.util.ByteArrayMapper) DataSegment(io.druid.timeline.DataSegment) ImmutableMap(com.google.common.collect.ImmutableMap) TimeZone(java.util.TimeZone) MapUtils(com.metamx.common.MapUtils) Collection(java.util.Collection) Set(java.util.Set) Interner(com.google.common.collect.Interner) Reader(java.io.Reader) MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) FileNotFoundException(java.io.FileNotFoundException) TimestampParseExprMacro(io.druid.query.expression.TimestampParseExprMacro) List(java.util.List) PartitionChunk(io.druid.timeline.partition.PartitionChunk) ISOChronology(org.joda.time.chrono.ISOChronology) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) TrimExprMacro(io.druid.query.expression.TrimExprMacro) HttpClient(com.metamx.http.client.HttpClient) Iterables(com.google.common.collect.Iterables) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) TimestampFormatExprMacro(io.druid.query.expression.TimestampFormatExprMacro) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) TimestampExtractExprMacro(io.druid.query.expression.TimestampExtractExprMacro) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) RegexpExtractExprMacro(io.druid.query.expression.RegexpExtractExprMacro) LikeExprMacro(io.druid.query.expression.LikeExprMacro) TimestampCeilExprMacro(io.druid.query.expression.TimestampCeilExprMacro) ShardSpec(io.druid.timeline.partition.ShardSpec) ArrayList(java.util.ArrayList) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) HashSet(java.util.HashSet) IndexMergerV9(io.druid.segment.IndexMergerV9) Interval(org.joda.time.Interval) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) JodaUtils(com.metamx.common.JodaUtils) ImmutableList(com.google.common.collect.ImmutableList) StringUtils(org.apache.hadoop.util.StringUtils) ResultIterator(org.skife.jdbi.v2.ResultIterator) TimestampShiftExprMacro(io.druid.query.expression.TimestampShiftExprMacro) OutputStream(java.io.OutputStream) HttpHeaders(org.jboss.netty.handler.codec.http.HttpHeaders) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec) Logger(org.slf4j.Logger) Folder3(org.skife.jdbi.v2.Folder3) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) EmittingLogger(com.metamx.emitter.EmittingLogger) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) Throwables(com.google.common.base.Throwables) Interners(com.google.common.collect.Interners) Query(org.skife.jdbi.v2.Query) IOException(java.io.IOException) InputStreamReader(java.io.InputStreamReader) UnknownHostException(java.net.UnknownHostException) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) HdfsDataSegmentPusherConfig(io.druid.storage.hdfs.HdfsDataSegmentPusherConfig) Handle(org.skife.jdbi.v2.Handle) Ordering(com.google.common.collect.Ordering) ExprMacroTable(io.druid.math.expr.ExprMacroTable) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) HiveDruidSerializationModule(org.apache.hadoop.hive.druid.serde.HiveDruidSerializationModule) RetryProxy(org.apache.hadoop.io.retry.RetryProxy) NoopEmitter(com.metamx.emitter.core.NoopEmitter) ServiceEmitter(com.metamx.emitter.service.ServiceEmitter) Collections(java.util.Collections) MySQLConnector(io.druid.metadata.storage.mysql.MySQLConnector) InputStream(java.io.InputStream) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) DataSegment(io.druid.timeline.DataSegment) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) ShardSpec(io.druid.timeline.partition.ShardSpec) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) ImmutableMap(com.google.common.collect.ImmutableMap) DateTime(org.joda.time.DateTime) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) Interval(org.joda.time.Interval)

Example 4 with MetadataStorageTablesConfig

use of io.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class TestDruidStorageHandler method testInsertIntoAppendOneMorePartition.

@Test
public void testInsertIntoAppendOneMorePartition() throws MetaException, IOException {
    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    druidStorageHandler.preCreateTable(tableMock);
    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
    HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig();
    pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
    DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(pusherConfig, config, DruidStorageHandlerUtils.JSON_MAPPER);
    List<DataSegment> existingSegments = Arrays.asList(createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)));
    DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME, existingSegments, true, config, dataSegmentPusher);
    DataSegment dataSegment = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0));
    Path descriptorPath = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment, descriptorPath);
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    final List<DataSegment> dataSegmentList = getUsedSegmentsList(connector, metadataStorageTablesConfig);
    Assert.assertEquals(2, dataSegmentList.size());
    DataSegment persistedSegment = dataSegmentList.get(1);
    Assert.assertEquals("v0", persistedSegment.getVersion());
    Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec);
    Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum());
    Path expectedFinalHadoopPath = new Path(dataSegmentPusher.getPathForHadoop(), dataSegmentPusher.makeIndexPathName(persistedSegment, DruidStorageHandlerUtils.INDEX_ZIP));
    Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", expectedFinalHadoopPath.toString()), persistedSegment.getLoadSpec());
    Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
}
Also used : Path(org.apache.hadoop.fs.Path) MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) HdfsDataSegmentPusherConfig(io.druid.storage.hdfs.HdfsDataSegmentPusherConfig) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) DataSegment(io.druid.timeline.DataSegment) File(java.io.File) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 5 with MetadataStorageTablesConfig

use of io.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class TestDruidStorageHandler method testCommitInsertIntoWhenDestinationSegmentFileExist.

@Test
public void testCommitInsertIntoWhenDestinationSegmentFileExist() throws MetaException, IOException {
    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    druidStorageHandler.preCreateTable(tableMock);
    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
    List<DataSegment> existingSegments = Arrays.asList(createSegment(new Path(taskDirPath, "index_old.zip").toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1)));
    HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig();
    pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
    DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(pusherConfig, config, DruidStorageHandlerUtils.JSON_MAPPER);
    DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME, existingSegments, true, config, dataSegmentPusher);
    DataSegment dataSegment = createSegment(new Path(taskDirPath, "index.zip").toString(), new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
    Path descriptorPath = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment, descriptorPath);
    // Create segment file at the destination location with LinearShardSpec(2)
    DataSegment segment = createSegment(new Path(taskDirPath, "index_conflict.zip").toString(), new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(1));
    Path segmentPath = new Path(dataSegmentPusher.getPathForHadoop(), dataSegmentPusher.makeIndexPathName(segment, DruidStorageHandlerUtils.INDEX_ZIP));
    FileUtils.writeStringToFile(new File(segmentPath.toUri()), "dummy");
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    final List<DataSegment> dataSegmentList = getUsedSegmentsList(connector, metadataStorageTablesConfig);
    Assert.assertEquals(2, dataSegmentList.size());
    DataSegment persistedSegment = dataSegmentList.get(1);
    // Insert into appends to old version
    Assert.assertEquals("v0", persistedSegment.getVersion());
    Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec);
    // insert into should skip and increment partition number to 3
    Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum());
    Path expectedFinalHadoopPath = new Path(dataSegmentPusher.getPathForHadoop(), dataSegmentPusher.makeIndexPathName(persistedSegment, DruidStorageHandlerUtils.INDEX_ZIP));
    Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", expectedFinalHadoopPath.toString()), persistedSegment.getLoadSpec());
    Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
}
Also used : Path(org.apache.hadoop.fs.Path) MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) HdfsDataSegmentPusherConfig(io.druid.storage.hdfs.HdfsDataSegmentPusherConfig) DataSegment(io.druid.timeline.DataSegment) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) File(java.io.File) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

MetadataStorageTablesConfig (io.druid.metadata.MetadataStorageTablesConfig)10 DataSegmentPusher (io.druid.segment.loading.DataSegmentPusher)8 HdfsDataSegmentPusher (io.druid.storage.hdfs.HdfsDataSegmentPusher)8 HdfsDataSegmentPusherConfig (io.druid.storage.hdfs.HdfsDataSegmentPusherConfig)8 DataSegment (io.druid.timeline.DataSegment)8 LinearShardSpec (io.druid.timeline.partition.LinearShardSpec)8 Path (org.apache.hadoop.fs.Path)8 Interval (org.joda.time.Interval)8 Test (org.junit.Test)8 LocalFileSystem (org.apache.hadoop.fs.LocalFileSystem)7 File (java.io.File)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)2 InjectableValues (com.fasterxml.jackson.databind.InjectableValues)1 NamedType (com.fasterxml.jackson.databind.jsontype.NamedType)1 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)1 Throwables (com.google.common.base.Throwables)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 Interner (com.google.common.collect.Interner)1