Search in sources :

Example 1 with DataSourceMetadata

use of org.apache.druid.indexing.overlord.DataSourceMetadata in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method updateDataSourceMetadataWithHandle.

/**
 * Compare-and-swap dataSource metadata in a transaction. This will only modify dataSource metadata if it equals
 * oldCommitMetadata when this function is called (based on T.equals). This method is idempotent in that if
 * the metadata already equals newCommitMetadata, it will return true.
 *
 * @param handle        database handle
 * @param dataSource    druid dataSource
 * @param startMetadata dataSource metadata pre-insert must match this startMetadata according to
 *                      {@link DataSourceMetadata#matches(DataSourceMetadata)}
 * @param endMetadata   dataSource metadata post-insert will have this endMetadata merged in with
 *                      {@link DataSourceMetadata#plus(DataSourceMetadata)}
 *
 * @return SUCCESS if dataSource metadata was updated from matching startMetadata to matching endMetadata, FAILURE or
 * TRY_AGAIN if it definitely was not updated. This guarantee is meant to help
 * {@link #announceHistoricalSegments(Set, Set, DataSourceMetadata, DataSourceMetadata)}
 * achieve its own guarantee.
 *
 * @throws RuntimeException if state is unknown after this call
 */
protected DataStoreMetadataUpdateResult updateDataSourceMetadataWithHandle(final Handle handle, final String dataSource, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
    Preconditions.checkNotNull(dataSource, "dataSource");
    Preconditions.checkNotNull(startMetadata, "startMetadata");
    Preconditions.checkNotNull(endMetadata, "endMetadata");
    final byte[] oldCommitMetadataBytesFromDb = retrieveDataSourceMetadataWithHandleAsBytes(handle, dataSource);
    final String oldCommitMetadataSha1FromDb;
    final DataSourceMetadata oldCommitMetadataFromDb;
    if (oldCommitMetadataBytesFromDb == null) {
        oldCommitMetadataSha1FromDb = null;
        oldCommitMetadataFromDb = null;
    } else {
        oldCommitMetadataSha1FromDb = BaseEncoding.base16().encode(Hashing.sha1().hashBytes(oldCommitMetadataBytesFromDb).asBytes());
        oldCommitMetadataFromDb = jsonMapper.readValue(oldCommitMetadataBytesFromDb, DataSourceMetadata.class);
    }
    final boolean startMetadataMatchesExisting;
    if (oldCommitMetadataFromDb == null) {
        startMetadataMatchesExisting = startMetadata.isValidStart();
    } else {
        // Checking against the last committed metadata.
        // Converting the last one into start metadata for checking since only the same type of metadata can be matched.
        // Even though kafka/kinesis indexing services use different sequenceNumber types for representing
        // start and end sequenceNumbers, the below conversion is fine because the new start sequenceNumbers are supposed
        // to be same with end sequenceNumbers of the last commit.
        startMetadataMatchesExisting = startMetadata.asStartMetadata().matches(oldCommitMetadataFromDb.asStartMetadata());
    }
    if (!startMetadataMatchesExisting) {
        // Not in the desired start state.
        log.error("Not updating metadata, existing state[%s] in metadata store doesn't match to the new start state[%s].", oldCommitMetadataFromDb, startMetadata);
        return DataStoreMetadataUpdateResult.FAILURE;
    }
    // Only endOffsets should be stored in metadata store
    final DataSourceMetadata newCommitMetadata = oldCommitMetadataFromDb == null ? endMetadata : oldCommitMetadataFromDb.plus(endMetadata);
    final byte[] newCommitMetadataBytes = jsonMapper.writeValueAsBytes(newCommitMetadata);
    final String newCommitMetadataSha1 = BaseEncoding.base16().encode(Hashing.sha1().hashBytes(newCommitMetadataBytes).asBytes());
    final DataStoreMetadataUpdateResult retVal;
    if (oldCommitMetadataBytesFromDb == null) {
        // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
        final int numRows = handle.createStatement(StringUtils.format("INSERT INTO %s (dataSource, created_date, commit_metadata_payload, commit_metadata_sha1) " + "VALUES (:dataSource, :created_date, :commit_metadata_payload, :commit_metadata_sha1)", dbTables.getDataSourceTable())).bind("dataSource", dataSource).bind("created_date", DateTimes.nowUtc().toString()).bind("commit_metadata_payload", newCommitMetadataBytes).bind("commit_metadata_sha1", newCommitMetadataSha1).execute();
        retVal = numRows == 1 ? DataStoreMetadataUpdateResult.SUCCESS : DataStoreMetadataUpdateResult.TRY_AGAIN;
    } else {
        // Expecting a particular old metadata; use the SHA1 in a compare-and-swap UPDATE
        final int numRows = handle.createStatement(StringUtils.format("UPDATE %s SET " + "commit_metadata_payload = :new_commit_metadata_payload, " + "commit_metadata_sha1 = :new_commit_metadata_sha1 " + "WHERE dataSource = :dataSource AND commit_metadata_sha1 = :old_commit_metadata_sha1", dbTables.getDataSourceTable())).bind("dataSource", dataSource).bind("old_commit_metadata_sha1", oldCommitMetadataSha1FromDb).bind("new_commit_metadata_payload", newCommitMetadataBytes).bind("new_commit_metadata_sha1", newCommitMetadataSha1).execute();
        retVal = numRows == 1 ? DataStoreMetadataUpdateResult.SUCCESS : DataStoreMetadataUpdateResult.TRY_AGAIN;
    }
    if (retVal == DataStoreMetadataUpdateResult.SUCCESS) {
        log.info("Updated metadata from[%s] to[%s].", oldCommitMetadataFromDb, newCommitMetadata);
    } else {
        log.info("Not updating metadata, compare-and-swap failure.");
    }
    return retVal;
}
Also used : DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata)

Example 2 with DataSourceMetadata

use of org.apache.druid.indexing.overlord.DataSourceMetadata in project druid by druid-io.

the class SeekableStreamSupervisor method resetInternal.

@VisibleForTesting
public void resetInternal(DataSourceMetadata dataSourceMetadata) {
    if (dataSourceMetadata == null) {
        // Reset everything
        boolean result = indexerMetadataStorageCoordinator.deleteDataSourceMetadata(dataSource);
        log.info("Reset dataSource[%s] - dataSource metadata entry deleted? [%s]", dataSource, result);
        activelyReadingTaskGroups.values().forEach(group -> killTasksInGroup(group, "DataSourceMetadata is not found while reset"));
        activelyReadingTaskGroups.clear();
        partitionGroups.clear();
        partitionOffsets.clear();
    } else {
        if (!checkSourceMetadataMatch(dataSourceMetadata)) {
            throw new IAE("Datasource metadata instance does not match required, found instance of [%s]", dataSourceMetadata.getClass());
        }
        log.info("Reset dataSource[%s] with metadata[%s]", dataSource, dataSourceMetadata);
        // Reset only the partitions in dataSourceMetadata if it has not been reset yet
        @SuppressWarnings("unchecked") final SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> resetMetadata = (SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType>) dataSourceMetadata;
        if (resetMetadata.getSeekableStreamSequenceNumbers().getStream().equals(ioConfig.getStream())) {
            // metadata can be null
            final DataSourceMetadata metadata = indexerMetadataStorageCoordinator.retrieveDataSourceMetadata(dataSource);
            if (metadata != null && !checkSourceMetadataMatch(metadata)) {
                throw new IAE("Datasource metadata instance does not match required, found instance of [%s]", metadata.getClass());
            }
            @SuppressWarnings("unchecked") final SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> currentMetadata = (SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType>) metadata;
            // defend against consecutive reset requests from replicas
            // as well as the case where the metadata store do not have an entry for the reset partitions
            boolean doReset = false;
            for (Entry<PartitionIdType, SequenceOffsetType> resetPartitionOffset : resetMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap().entrySet()) {
                final SequenceOffsetType partitionOffsetInMetadataStore = currentMetadata == null ? null : currentMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap().get(resetPartitionOffset.getKey());
                final TaskGroup partitionTaskGroup = activelyReadingTaskGroups.get(getTaskGroupIdForPartition(resetPartitionOffset.getKey()));
                final boolean isSameOffset = partitionTaskGroup != null && partitionTaskGroup.startingSequences.get(resetPartitionOffset.getKey()).equals(resetPartitionOffset.getValue());
                if (partitionOffsetInMetadataStore != null || isSameOffset) {
                    doReset = true;
                    break;
                }
            }
            if (!doReset) {
                log.info("Ignoring duplicate reset request [%s]", dataSourceMetadata);
                return;
            }
            boolean metadataUpdateSuccess;
            if (currentMetadata == null) {
                metadataUpdateSuccess = true;
            } else {
                final DataSourceMetadata newMetadata = currentMetadata.minus(resetMetadata);
                try {
                    metadataUpdateSuccess = indexerMetadataStorageCoordinator.resetDataSourceMetadata(dataSource, newMetadata);
                } catch (IOException e) {
                    log.error("Resetting DataSourceMetadata failed [%s]", e.getMessage());
                    throw new RuntimeException(e);
                }
            }
            if (metadataUpdateSuccess) {
                resetMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap().keySet().forEach(partition -> {
                    final int groupId = getTaskGroupIdForPartition(partition);
                    killTaskGroupForPartitions(ImmutableSet.of(partition), "DataSourceMetadata is updated while reset");
                    activelyReadingTaskGroups.remove(groupId);
                    // killTaskGroupForPartitions() cleans up partitionGroups.
                    // Add the removed groups back.
                    partitionGroups.computeIfAbsent(groupId, k -> new HashSet<>());
                    partitionOffsets.put(partition, getNotSetMarker());
                });
            } else {
                throw new ISE("Unable to reset metadata");
            }
        } else {
            log.warn("Reset metadata stream [%s] and supervisor's stream name [%s] do not match", resetMetadata.getSeekableStreamSequenceNumbers().getStream(), ioConfig.getStream());
        }
    }
}
Also used : IOException(java.io.IOException) IAE(org.apache.druid.java.util.common.IAE) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) ISE(org.apache.druid.java.util.common.ISE) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 3 with DataSourceMetadata

use of org.apache.druid.indexing.overlord.DataSourceMetadata in project druid by druid-io.

the class AppenderatorDriverRealtimeIndexTaskTest method makeToolboxFactory.

private void makeToolboxFactory(final File directory) {
    taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    publishedSegments = new CopyOnWriteArrayList<>();
    ObjectMapper mapper = new DefaultObjectMapper();
    mapper.registerSubtypes(LinearShardSpec.class);
    mapper.registerSubtypes(NumberedShardSpec.class);
    IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {

        @Override
        public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
            Set<DataSegment> result = super.announceHistoricalSegments(segments);
            Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
            publishedSegments.addAll(result);
            segments.forEach(s -> segmentLatch.countDown());
            return result;
        }

        @Override
        public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments, Set<DataSegment> segmentsToDrop, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
            SegmentPublishResult result = super.announceHistoricalSegments(segments, segmentsToDrop, startMetadata, endMetadata);
            Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
            publishedSegments.addAll(result.getSegments());
            result.getSegments().forEach(s -> segmentLatch.countDown());
            return result;
        }
    };
    taskLockbox = new TaskLockbox(taskStorage, mdc);
    final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
    final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, mdc, EMITTER, EasyMock.createMock(SupervisorManager.class));
    final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
    final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), (query, future) -> {
    // do nothing
    })));
    handOffCallbacks = new ConcurrentHashMap<>();
    final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {

        @Override
        public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
            handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
            handoffLatch.countDown();
            return true;
        }

        @Override
        public void start() {
        // Noop
        }

        @Override
        public void close() {
        // Noop
        }
    };
    final TestUtils testUtils = new TestUtils();
    taskToolboxFactory = new TaskToolboxFactory(taskConfig, new DruidNode("druid/middlemanager", "localhost", false, 8091, null, true, false), taskActionClientFactory, EMITTER, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
    null, // DataSegmentArchiver
    null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, () -> conglomerate, // queryExecutorService
    DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
Also used : TaskReport(org.apache.druid.indexing.common.TaskReport) TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) SegmentCacheManagerFactory(org.apache.druid.indexing.common.SegmentCacheManagerFactory) DirectQueryProcessingPool(org.apache.druid.query.DirectQueryProcessingPool) Arrays(java.util.Arrays) LookupNodeService(org.apache.druid.discovery.LookupNodeService) TestDataSegmentAnnouncer(org.apache.druid.indexing.test.TestDataSegmentAnnouncer) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) IngestionState(org.apache.druid.indexer.IngestionState) Pair(org.apache.druid.java.util.common.Pair) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) Map(java.util.Map) ExpressionTransform(org.apache.druid.segment.transform.ExpressionTransform) TestDataSegmentPusher(org.apache.druid.indexing.test.TestDataSegmentPusher) NoopJoinableFactory(org.apache.druid.segment.join.NoopJoinableFactory) NoopIndexingServiceClient(org.apache.druid.client.indexing.NoopIndexingServiceClient) Execs(org.apache.druid.java.util.common.concurrent.Execs) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) CacheConfig(org.apache.druid.client.cache.CacheConfig) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) InputRow(org.apache.druid.data.input.InputRow) TaskState(org.apache.druid.indexer.TaskState) CountDownLatch(java.util.concurrent.CountDownLatch) Firehose(org.apache.druid.data.input.Firehose) DimFilter(org.apache.druid.query.filter.DimFilter) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) TestDerbyConnector(org.apache.druid.metadata.TestDerbyConnector) TaskActionClientFactory(org.apache.druid.indexing.common.actions.TaskActionClientFactory) RealtimeAppenderatorTuningConfig(org.apache.druid.indexing.common.index.RealtimeAppenderatorTuningConfig) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) TransformSpec(org.apache.druid.segment.transform.TransformSpec) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Iterables(com.google.common.collect.Iterables) DruidNodeAnnouncer(org.apache.druid.discovery.DruidNodeAnnouncer) ParseException(org.apache.druid.java.util.common.parsers.ParseException) TaskAuditLogConfig(org.apache.druid.indexing.common.actions.TaskAuditLogConfig) TimeAndDimsParseSpec(org.apache.druid.data.input.impl.TimeAndDimsParseSpec) TaskStatus(org.apache.druid.indexer.TaskStatus) EntryExistsException(org.apache.druid.metadata.EntryExistsException) LinkedHashMap(java.util.LinkedHashMap) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) AuthTestUtils(org.apache.druid.server.security.AuthTestUtils) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) Nullable(javax.annotation.Nullable) Before(org.junit.Before) FirehoseFactory(org.apache.druid.data.input.FirehoseFactory) TaskToolboxFactory(org.apache.druid.indexing.common.TaskToolboxFactory) Files(java.nio.file.Files) Executor(java.util.concurrent.Executor) DataSegmentServerAnnouncer(org.apache.druid.server.coordination.DataSegmentServerAnnouncer) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) FileUtils(org.apache.commons.io.FileUtils) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test) IOException(java.io.IOException) EasyMock(org.easymock.EasyMock) File(java.io.File) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) Result(org.apache.druid.query.Result) HeapMemoryTaskStorage(org.apache.druid.indexing.overlord.HeapMemoryTaskStorage) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) DruidNode(org.apache.druid.server.DruidNode) Assert(org.junit.Assert) ArrayDeque(java.util.ArrayDeque) DataSchema(org.apache.druid.segment.indexing.DataSchema) QueryPlus(org.apache.druid.query.QueryPlus) TaskConfig(org.apache.druid.indexing.common.config.TaskConfig) LongDimensionSchema(org.apache.druid.data.input.impl.LongDimensionSchema) LocalTaskActionClientFactory(org.apache.druid.indexing.common.actions.LocalTaskActionClientFactory) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) Druids(org.apache.druid.query.Druids) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) After(org.junit.After) ServerType(org.apache.druid.server.coordination.ServerType) TypeReference(com.fasterxml.jackson.core.type.TypeReference) NoopChatHandlerProvider(org.apache.druid.segment.realtime.firehose.NoopChatHandlerProvider) DateTimes(org.apache.druid.java.util.common.DateTimes) RealtimeAppenderatorIngestionSpec(org.apache.druid.indexing.common.index.RealtimeAppenderatorIngestionSpec) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) ImmutableMap(com.google.common.collect.ImmutableMap) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) InputRowParser(org.apache.druid.data.input.impl.InputRowParser) RealtimeIOConfig(org.apache.druid.segment.indexing.RealtimeIOConfig) Collectors(java.util.stream.Collectors) TestUtils(org.apache.druid.indexing.common.TestUtils) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) DataNodeService(org.apache.druid.discovery.DataNodeService) List(java.util.List) UniformGranularitySpec(org.apache.druid.segment.indexing.granularity.UniformGranularitySpec) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DataSegment(org.apache.druid.timeline.DataSegment) SegmentHandoffNotifierFactory(org.apache.druid.segment.handoff.SegmentHandoffNotifierFactory) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) MapCache(org.apache.druid.client.cache.MapCache) Logger(org.apache.druid.java.util.common.logger.Logger) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) SingleFileTaskReportFileWriter(org.apache.druid.indexing.common.SingleFileTaskReportFileWriter) TaskStorageConfig(org.apache.druid.indexing.common.config.TaskStorageConfig) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) Deque(java.util.Deque) TaskActionToolbox(org.apache.druid.indexing.common.actions.TaskActionToolbox) ImmutableList(com.google.common.collect.ImmutableList) FloatDimensionSchema(org.apache.druid.data.input.impl.FloatDimensionSchema) NoopEmitter(org.apache.druid.java.util.emitter.core.NoopEmitter) ExpectedException(org.junit.rules.ExpectedException) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) SegmentHandoffNotifier(org.apache.druid.segment.handoff.SegmentHandoffNotifier) Period(org.joda.time.Period) TaskLockbox(org.apache.druid.indexing.overlord.TaskLockbox) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) MapInputRowParser(org.apache.druid.data.input.impl.MapInputRowParser) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) TestDataSegmentKiller(org.apache.druid.indexing.test.TestDataSegmentKiller) Granularities(org.apache.druid.java.util.common.granularity.Granularities) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) Rule(org.junit.Rule) NullHandling(org.apache.druid.common.config.NullHandling) MonitorScheduler(org.apache.druid.java.util.metrics.MonitorScheduler) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TemporaryFolder(org.junit.rules.TemporaryFolder) SingleFileTaskReportFileWriter(org.apache.druid.indexing.common.SingleFileTaskReportFileWriter) Set(java.util.Set) TaskActionClientFactory(org.apache.druid.indexing.common.actions.TaskActionClientFactory) LocalTaskActionClientFactory(org.apache.druid.indexing.common.actions.LocalTaskActionClientFactory) TestDataSegmentAnnouncer(org.apache.druid.indexing.test.TestDataSegmentAnnouncer) TaskConfig(org.apache.druid.indexing.common.config.TaskConfig) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) TaskAuditLogConfig(org.apache.druid.indexing.common.actions.TaskAuditLogConfig) DataSegment(org.apache.druid.timeline.DataSegment) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) AuthTestUtils(org.apache.druid.server.security.AuthTestUtils) TestUtils(org.apache.druid.indexing.common.TestUtils) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) Executor(java.util.concurrent.Executor) NoopIndexingServiceClient(org.apache.druid.client.indexing.NoopIndexingServiceClient) TaskToolboxFactory(org.apache.druid.indexing.common.TaskToolboxFactory) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) TaskActionToolbox(org.apache.druid.indexing.common.actions.TaskActionToolbox) LocalTaskActionClientFactory(org.apache.druid.indexing.common.actions.LocalTaskActionClientFactory) CacheConfig(org.apache.druid.client.cache.CacheConfig) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) TestDataSegmentPusher(org.apache.druid.indexing.test.TestDataSegmentPusher) IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) TaskStorageConfig(org.apache.druid.indexing.common.config.TaskStorageConfig) NoopChatHandlerProvider(org.apache.druid.segment.realtime.firehose.NoopChatHandlerProvider) HeapMemoryTaskStorage(org.apache.druid.indexing.overlord.HeapMemoryTaskStorage) SegmentHandoffNotifier(org.apache.druid.segment.handoff.SegmentHandoffNotifier) SegmentCacheManagerFactory(org.apache.druid.indexing.common.SegmentCacheManagerFactory) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) LookupNodeService(org.apache.druid.discovery.LookupNodeService) TestDataSegmentKiller(org.apache.druid.indexing.test.TestDataSegmentKiller) SegmentHandoffNotifierFactory(org.apache.druid.segment.handoff.SegmentHandoffNotifierFactory) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TaskLockbox(org.apache.druid.indexing.overlord.TaskLockbox) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DruidNode(org.apache.druid.server.DruidNode) DataNodeService(org.apache.druid.discovery.DataNodeService)

Example 4 with DataSourceMetadata

use of org.apache.druid.indexing.overlord.DataSourceMetadata in project druid by druid-io.

the class DerivativeDataSourceManager method updateDerivatives.

private void updateDerivatives() {
    List<Pair<String, DerivativeDataSourceMetadata>> derivativesInDatabase = connector.retryWithHandle(handle -> handle.createQuery(StringUtils.format("SELECT DISTINCT dataSource,commit_metadata_payload FROM %1$s", dbTables.get().getDataSourceTable())).map((int index, ResultSet r, StatementContext ctx) -> {
        String datasourceName = r.getString("dataSource");
        DataSourceMetadata payload = JacksonUtils.readValue(objectMapper, r.getBytes("commit_metadata_payload"), DataSourceMetadata.class);
        if (!(payload instanceof DerivativeDataSourceMetadata)) {
            return null;
        }
        DerivativeDataSourceMetadata metadata = (DerivativeDataSourceMetadata) payload;
        return new Pair<>(datasourceName, metadata);
    }).list());
    List<DerivativeDataSource> derivativeDataSources = derivativesInDatabase.parallelStream().filter(data -> data != null).map(derivatives -> {
        String name = derivatives.lhs;
        DerivativeDataSourceMetadata metadata = derivatives.rhs;
        String baseDataSource = metadata.getBaseDataSource();
        long avgSizePerGranularity = getAvgSizePerGranularity(name);
        log.info("find derivatives: {bases=%s, derivative=%s, dimensions=%s, metrics=%s, avgSize=%s}", baseDataSource, name, metadata.getDimensions(), metadata.getMetrics(), avgSizePerGranularity);
        return new DerivativeDataSource(name, baseDataSource, metadata.getColumns(), avgSizePerGranularity);
    }).filter(derivatives -> derivatives.getAvgSizeBasedGranularity() > 0).collect(Collectors.toList());
    ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> newDerivatives = new ConcurrentHashMap<>();
    for (DerivativeDataSource derivative : derivativeDataSources) {
        newDerivatives.computeIfAbsent(derivative.getBaseDataSource(), ds -> new TreeSet<>()).add(derivative);
    }
    ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> current;
    do {
        current = DERIVATIVES_REF.get();
    } while (!DERIVATIVES_REF.compareAndSet(current, newDerivatives));
}
Also used : MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) SortedSet(java.util.SortedSet) Intervals(org.apache.druid.java.util.common.Intervals) Inject(com.google.inject.Inject) Supplier(com.google.common.base.Supplier) Duration(org.joda.time.Duration) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) AtomicReference(java.util.concurrent.atomic.AtomicReference) StatementContext(org.skife.jdbi.v2.StatementContext) TreeSet(java.util.TreeSet) Pair(org.apache.druid.java.util.common.Pair) HashSet(java.util.HashSet) Interval(org.joda.time.Interval) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop) ResultSet(java.sql.ResultSet) ManageLifecycle(org.apache.druid.guice.ManageLifecycle) ListeningScheduledExecutorService(com.google.common.util.concurrent.ListeningScheduledExecutorService) DateTimes(org.apache.druid.java.util.common.DateTimes) SQLMetadataConnector(org.apache.druid.metadata.SQLMetadataConnector) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) ImmutableMap(com.google.common.collect.ImmutableMap) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Handle(org.skife.jdbi.v2.Handle) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) DataSegment(org.apache.druid.timeline.DataSegment) SortedSet(java.util.SortedSet) StatementContext(org.skife.jdbi.v2.StatementContext) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) TreeSet(java.util.TreeSet) ResultSet(java.sql.ResultSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Pair(org.apache.druid.java.util.common.Pair)

Example 5 with DataSourceMetadata

use of org.apache.druid.indexing.overlord.DataSourceMetadata in project druid by druid-io.

the class KinesisIndexTaskTest method makeToolboxFactory.

private void makeToolboxFactory() throws IOException {
    directory = tempFolder.newFolder();
    final TestUtils testUtils = new TestUtils();
    final ObjectMapper objectMapper = testUtils.getTestObjectMapper();
    objectMapper.setInjectableValues(((InjectableValues.Std) objectMapper.getInjectableValues()).addValue(AWSCredentialsConfig.class, new AWSCredentialsConfig()));
    for (Module module : new KinesisIndexingServiceModule().getJacksonModules()) {
        objectMapper.registerModule(module);
    }
    final TaskConfig taskConfig = new TaskConfig(new File(directory, "baseDir").getPath(), new File(directory, "baseTaskDir").getPath(), null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
    final TestDerbyConnector derbyConnector = derby.getConnector();
    derbyConnector.createDataSourceTable();
    derbyConnector.createPendingSegmentsTable();
    derbyConnector.createSegmentTable();
    derbyConnector.createRulesTable();
    derbyConnector.createConfigTable();
    derbyConnector.createTaskTables();
    derbyConnector.createAuditTable();
    taskStorage = new MetadataTaskStorage(derbyConnector, new TaskStorageConfig(null), new DerbyMetadataStorageActionHandlerFactory(derbyConnector, derby.metadataTablesConfigSupplier().get(), objectMapper));
    metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(testUtils.getTestObjectMapper(), derby.metadataTablesConfigSupplier().get(), derbyConnector);
    taskLockbox = new TaskLockbox(taskStorage, metadataStorageCoordinator);
    final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, metadataStorageCoordinator, emitter, new SupervisorManager(null) {

        @Override
        public boolean checkPointDataSourceMetadata(String supervisorId, int taskGroupId, @Nullable DataSourceMetadata checkpointMetadata) {
            LOG.info("Adding checkpoint hash to the set");
            checkpointRequestsHash.add(Objects.hash(supervisorId, taskGroupId, checkpointMetadata));
            return true;
        }
    });
    final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
    final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {

        @Override
        public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
            if (doHandoff) {
                // Simulate immediate handoff
                exec.execute(handOffRunnable);
            }
            return true;
        }

        @Override
        public void start() {
        // Noop
        }

        @Override
        public void close() {
        // Noop
        }
    };
    final LocalDataSegmentPusherConfig dataSegmentPusherConfig = new LocalDataSegmentPusherConfig();
    dataSegmentPusherConfig.storageDirectory = getSegmentDirectory();
    final DataSegmentPusher dataSegmentPusher = new LocalDataSegmentPusher(dataSegmentPusherConfig);
    toolboxFactory = new TaskToolboxFactory(taskConfig, // taskExecutorNode
    null, taskActionClientFactory, emitter, dataSegmentPusher, new TestDataSegmentKiller(), // DataSegmentMover
    null, // DataSegmentArchiver
    null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, this::makeTimeseriesOnlyConglomerate, DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
Also used : JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) SegmentCacheManagerFactory(org.apache.druid.indexing.common.SegmentCacheManagerFactory) DirectQueryProcessingPool(org.apache.druid.query.DirectQueryProcessingPool) Arrays(java.util.Arrays) LookupNodeService(org.apache.druid.discovery.LookupNodeService) TestDataSegmentAnnouncer(org.apache.druid.indexing.test.TestDataSegmentAnnouncer) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) Map(java.util.Map) ExpressionTransform(org.apache.druid.segment.transform.ExpressionTransform) NoopJoinableFactory(org.apache.druid.segment.join.NoopJoinableFactory) NoopIndexingServiceClient(org.apache.druid.client.indexing.NoopIndexingServiceClient) JacksonInject(com.fasterxml.jackson.annotation.JacksonInject) AfterClass(org.junit.AfterClass) Execs(org.apache.druid.java.util.common.concurrent.Execs) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) CacheConfig(org.apache.druid.client.cache.CacheConfig) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) Set(java.util.Set) Executors(java.util.concurrent.Executors) TaskState(org.apache.druid.indexer.TaskState) TestDerbyConnector(org.apache.druid.metadata.TestDerbyConnector) TaskActionClientFactory(org.apache.druid.indexing.common.actions.TaskActionClientFactory) TransformSpec(org.apache.druid.segment.transform.TransformSpec) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) SeekableStreamIndexTaskTestBase(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTestBase) DruidNodeAnnouncer(org.apache.druid.discovery.DruidNodeAnnouncer) RunWith(org.junit.runner.RunWith) TaskAuditLogConfig(org.apache.druid.indexing.common.actions.TaskAuditLogConfig) TaskStatus(org.apache.druid.indexer.TaskStatus) LinkedHashMap(java.util.LinkedHashMap) LocalDataSegmentPusherConfig(org.apache.druid.segment.loading.LocalDataSegmentPusherConfig) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) AuthTestUtils(org.apache.druid.server.security.AuthTestUtils) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) Nullable(javax.annotation.Nullable) Before(org.junit.Before) SeekableStreamEndSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamEndSequenceNumbers) TaskToolboxFactory(org.apache.druid.indexing.common.TaskToolboxFactory) Executor(java.util.concurrent.Executor) DataSegmentServerAnnouncer(org.apache.druid.server.coordination.DataSegmentServerAnnouncer) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) Test(org.junit.Test) IOException(java.io.IOException) EasyMock(org.easymock.EasyMock) File(java.io.File) Futures(com.google.common.util.concurrent.Futures) TreeMap(java.util.TreeMap) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) DruidNode(org.apache.druid.server.DruidNode) Named(com.google.inject.name.Named) Assert(org.junit.Assert) DataSchema(org.apache.druid.segment.indexing.DataSchema) Module(com.fasterxml.jackson.databind.Module) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) TaskConfig(org.apache.druid.indexing.common.config.TaskConfig) OrderedPartitionableRecord(org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord) LocalTaskActionClientFactory(org.apache.druid.indexing.common.actions.LocalTaskActionClientFactory) TimeoutException(java.util.concurrent.TimeoutException) TaskResource(org.apache.druid.indexing.common.task.TaskResource) SeekableStreamSupervisor(org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor) SequenceMetadata(org.apache.druid.indexing.seekablestream.SequenceMetadata) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) Task(org.apache.druid.indexing.common.task.Task) After(org.junit.After) ServerType(org.apache.druid.server.coordination.ServerType) NoopChatHandlerProvider(org.apache.druid.segment.realtime.firehose.NoopChatHandlerProvider) DerbyMetadataStorageActionHandlerFactory(org.apache.druid.metadata.DerbyMetadataStorageActionHandlerFactory) Parameterized(org.junit.runners.Parameterized) SeekableStreamStartSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamStartSequenceNumbers) DateTimes(org.apache.druid.java.util.common.DateTimes) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Collectors(java.util.stream.Collectors) LockGranularity(org.apache.druid.indexing.common.LockGranularity) TestUtils(org.apache.druid.indexing.common.TestUtils) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) Objects(java.util.Objects) DataNodeService(org.apache.druid.discovery.DataNodeService) List(java.util.List) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) SegmentHandoffNotifierFactory(org.apache.druid.segment.handoff.SegmentHandoffNotifierFactory) KinesisSupervisor(org.apache.druid.indexing.kinesis.supervisor.KinesisSupervisor) MetadataTaskStorage(org.apache.druid.indexing.overlord.MetadataTaskStorage) MapCache(org.apache.druid.client.cache.MapCache) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) BeforeClass(org.junit.BeforeClass) SingleFileTaskReportFileWriter(org.apache.druid.indexing.common.SingleFileTaskReportFileWriter) TaskStorageConfig(org.apache.druid.indexing.common.config.TaskStorageConfig) HashMap(java.util.HashMap) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) TaskActionToolbox(org.apache.druid.indexing.common.actions.TaskActionToolbox) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) JsonTypeName(com.fasterxml.jackson.annotation.JsonTypeName) ImmutableList(com.google.common.collect.ImmutableList) NoopEmitter(org.apache.druid.java.util.emitter.core.NoopEmitter) SegmentHandoffNotifier(org.apache.druid.segment.handoff.SegmentHandoffNotifier) Period(org.joda.time.Period) TestAppenderatorsManager(org.apache.druid.indexing.common.task.TestAppenderatorsManager) TaskLockbox(org.apache.druid.indexing.overlord.TaskLockbox) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) IndexTaskTest(org.apache.druid.indexing.common.task.IndexTaskTest) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) AWSCredentialsConfig(org.apache.druid.common.aws.AWSCredentialsConfig) TestDataSegmentKiller(org.apache.druid.indexing.test.TestDataSegmentKiller) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) Rule(org.junit.Rule) MonitorScheduler(org.apache.druid.java.util.metrics.MonitorScheduler) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) LocalDataSegmentPusher(org.apache.druid.segment.loading.LocalDataSegmentPusher) AsyncFunction(com.google.common.util.concurrent.AsyncFunction) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) Collections(java.util.Collections) SeekableStreamIndexTaskRunner(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner) TemporaryFolder(org.junit.rules.TemporaryFolder) DerbyMetadataStorageActionHandlerFactory(org.apache.druid.metadata.DerbyMetadataStorageActionHandlerFactory) SingleFileTaskReportFileWriter(org.apache.druid.indexing.common.SingleFileTaskReportFileWriter) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) LocalDataSegmentPusher(org.apache.druid.segment.loading.LocalDataSegmentPusher) TaskActionClientFactory(org.apache.druid.indexing.common.actions.TaskActionClientFactory) LocalTaskActionClientFactory(org.apache.druid.indexing.common.actions.LocalTaskActionClientFactory) TestDataSegmentAnnouncer(org.apache.druid.indexing.test.TestDataSegmentAnnouncer) TaskConfig(org.apache.druid.indexing.common.config.TaskConfig) TaskAuditLogConfig(org.apache.druid.indexing.common.actions.TaskAuditLogConfig) AuthTestUtils(org.apache.druid.server.security.AuthTestUtils) TestUtils(org.apache.druid.indexing.common.TestUtils) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) Executor(java.util.concurrent.Executor) NoopIndexingServiceClient(org.apache.druid.client.indexing.NoopIndexingServiceClient) TaskToolboxFactory(org.apache.druid.indexing.common.TaskToolboxFactory) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) TaskActionToolbox(org.apache.druid.indexing.common.actions.TaskActionToolbox) LocalTaskActionClientFactory(org.apache.druid.indexing.common.actions.LocalTaskActionClientFactory) CacheConfig(org.apache.druid.client.cache.CacheConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) TaskStorageConfig(org.apache.druid.indexing.common.config.TaskStorageConfig) NoopChatHandlerProvider(org.apache.druid.segment.realtime.firehose.NoopChatHandlerProvider) LocalDataSegmentPusherConfig(org.apache.druid.segment.loading.LocalDataSegmentPusherConfig) SegmentHandoffNotifier(org.apache.druid.segment.handoff.SegmentHandoffNotifier) SegmentCacheManagerFactory(org.apache.druid.indexing.common.SegmentCacheManagerFactory) LookupNodeService(org.apache.druid.discovery.LookupNodeService) AWSCredentialsConfig(org.apache.druid.common.aws.AWSCredentialsConfig) TestDerbyConnector(org.apache.druid.metadata.TestDerbyConnector) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) LocalDataSegmentPusher(org.apache.druid.segment.loading.LocalDataSegmentPusher) TestDataSegmentKiller(org.apache.druid.indexing.test.TestDataSegmentKiller) SegmentHandoffNotifierFactory(org.apache.druid.segment.handoff.SegmentHandoffNotifierFactory) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) TaskLockbox(org.apache.druid.indexing.overlord.TaskLockbox) Module(com.fasterxml.jackson.databind.Module) MetadataTaskStorage(org.apache.druid.indexing.overlord.MetadataTaskStorage) DataNodeService(org.apache.druid.discovery.DataNodeService) File(java.io.File) TestAppenderatorsManager(org.apache.druid.indexing.common.task.TestAppenderatorsManager)

Aggregations

DataSourceMetadata (org.apache.druid.indexing.overlord.DataSourceMetadata)12 Test (org.junit.Test)6 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)5 ImmutableMap (com.google.common.collect.ImmutableMap)5 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)5 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)5 IOException (java.io.IOException)5 List (java.util.List)5 Set (java.util.Set)5 TimeUnit (java.util.concurrent.TimeUnit)5 Collectors (java.util.stream.Collectors)5 DateTimes (org.apache.druid.java.util.common.DateTimes)5 StringUtils (org.apache.druid.java.util.common.StringUtils)5 Execs (org.apache.druid.java.util.common.concurrent.Execs)5 EmittingLogger (org.apache.druid.java.util.emitter.EmittingLogger)5 ImmutableList (com.google.common.collect.ImmutableList)4 ImmutableSet (com.google.common.collect.ImmutableSet)4 HashSet (java.util.HashSet)4 Map (java.util.Map)4 Executor (java.util.concurrent.Executor)4