Search in sources :

Example 26 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata.

/**
 * This tests the contention between 2 methods of DruidSchema, {@link DruidSchema#refresh} and
 * {@link DruidSchema#getSegmentMetadataSnapshot()}. It first triggers refreshing DruidSchema.
 * To mimic some heavy work done with {@link DruidSchema#lock}, {@link DruidSchema#buildDruidTable} is overriden
 * to sleep before doing real work. While refreshing DruidSchema, getSegmentMetadataSnapshot() is continuously
 * called to mimic reading the segments table of SystemSchema. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = exec.submit(() -> schema.getSegmentMetadataSnapshot()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertFalse(segmentsMetadata.isEmpty());
        // We want to call getTimeline while refreshing. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Example 27 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaTest method testServerSegmentRemovedCallbackRemoveUnknownSegment.

@Test
public void testServerSegmentRemovedCallbackRemoveUnknownSegment() throws InterruptedException {
    String datasource = "serverSegmentRemoveTest";
    CountDownLatch removeServerSegmentLatch = new CountDownLatch(1);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        void removeServerSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.removeServerSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                removeServerSegmentLatch.countDown();
            }
        }
    };
    serverView.addSegment(newSegment(datasource, 1), ServerType.BROKER);
    serverView.removeSegment(newSegment(datasource, 1), ServerType.HISTORICAL);
    Assert.assertTrue(removeServerSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(4, schema.getTotalSegments());
}
Also used : NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 28 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaTest method testSegmentAddedCallbackAddExistingSegment.

@Test
public void testSegmentAddedCallbackAddExistingSegment() throws InterruptedException {
    String datasource = "newSegmentAddTest";
    CountDownLatch addSegmentLatch = new CountDownLatch(2);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.addSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                addSegmentLatch.countDown();
            }
        }
    };
    DataSegment segment = newSegment(datasource, 1);
    serverView.addSegment(segment, ServerType.REALTIME);
    serverView.addSegment(segment, ServerType.HISTORICAL);
    Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(5, schema.getTotalSegments());
    List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
    Assert.assertEquals(1, metadatas.size());
    AvailableSegmentMetadata metadata = metadatas.get(0);
    // realtime flag is unset when there is any historical
    Assert.assertEquals(0, metadata.isRealtime());
    Assert.assertEquals(0, metadata.getNumRows());
    Assert.assertEquals(2, metadata.getNumReplicas());
    Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
    Assert.assertFalse(schema.getMutableSegments().contains(metadata.getSegment().getId()));
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 29 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaTest method testSegmentRemovedCallbackEmptyDataSourceAfterRemove.

@Test
public void testSegmentRemovedCallbackEmptyDataSourceAfterRemove() throws InterruptedException, IOException {
    String datasource = "segmentRemoveTest";
    CountDownLatch addSegmentLatch = new CountDownLatch(1);
    CountDownLatch removeSegmentLatch = new CountDownLatch(1);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.addSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                addSegmentLatch.countDown();
            }
        }

        @Override
        void removeSegment(final DataSegment segment) {
            super.removeSegment(segment);
            if (datasource.equals(segment.getDataSource())) {
                removeSegmentLatch.countDown();
            }
        }
    };
    DataSegment segment = newSegment(datasource, 1);
    serverView.addSegment(segment, ServerType.REALTIME);
    Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
    schema.refresh(Sets.newHashSet(segment.getId()), Sets.newHashSet(datasource));
    serverView.removeSegment(segment, ServerType.REALTIME);
    Assert.assertTrue(removeSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(4, schema.getTotalSegments());
    List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
    Assert.assertEquals(0, metadatas.size());
    Assert.assertFalse(schema.getSegmentsNeedingRefresh().contains(segment.getId()));
    Assert.assertFalse(schema.getMutableSegments().contains(segment.getId()));
    Assert.assertFalse(schema.getDataSourcesNeedingRebuild().contains(datasource));
    Assert.assertFalse(schema.getTableNames().contains(datasource));
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 30 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaTest method testAvailableSegmentMetadataNumRows.

/**
 * This tests that {@link AvailableSegmentMetadata#getNumRows()} is correct in case
 * of multiple replicas i.e. when {@link DruidSchema#addSegment(DruidServerMetadata, DataSegment)}
 * is called more than once for same segment
 */
@Test
public void testAvailableSegmentMetadataNumRows() {
    Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = schema.getSegmentMetadataSnapshot();
    final List<DataSegment> segments = segmentsMetadata.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
    Assert.assertEquals(4, segments.size());
    // find the only segment with datasource "foo2"
    final DataSegment existingSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
    Assert.assertNotNull(existingSegment);
    final AvailableSegmentMetadata existingMetadata = segmentsMetadata.get(existingSegment.getId());
    // update AvailableSegmentMetadata of existingSegment with numRows=5
    AvailableSegmentMetadata updatedMetadata = AvailableSegmentMetadata.from(existingMetadata).withNumRows(5).build();
    schema.setAvailableSegmentMetadata(existingSegment.getId(), updatedMetadata);
    // find a druidServer holding existingSegment
    final Pair<ImmutableDruidServer, DataSegment> pair = druidServers.stream().flatMap(druidServer -> druidServer.iterateAllSegments().stream().filter(segment -> segment.getId().equals(existingSegment.getId())).map(segment -> Pair.of(druidServer, segment))).findAny().orElse(null);
    Assert.assertNotNull(pair);
    final ImmutableDruidServer server = pair.lhs;
    Assert.assertNotNull(server);
    final DruidServerMetadata druidServerMetadata = server.getMetadata();
    // invoke DruidSchema#addSegment on existingSegment
    schema.addSegment(druidServerMetadata, existingSegment);
    segmentsMetadata = schema.getSegmentMetadataSnapshot();
    // get the only segment with datasource "foo2"
    final DataSegment currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
    final AvailableSegmentMetadata currentMetadata = segmentsMetadata.get(currentSegment.getId());
    Assert.assertEquals(updatedMetadata.getSegment().getId(), currentMetadata.getSegment().getId());
    Assert.assertEquals(updatedMetadata.getNumRows(), currentMetadata.getNumRows());
    // numreplicas do not change here since we addSegment with the same server which was serving existingSegment before
    Assert.assertEquals(updatedMetadata.getNumReplicas(), currentMetadata.getNumReplicas());
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) SegmentId(org.apache.druid.timeline.SegmentId) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Aggregations

DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)37 DataSegment (org.apache.druid.timeline.DataSegment)28 Test (org.junit.Test)25 CountDownLatch (java.util.concurrent.CountDownLatch)16 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)16 List (java.util.List)14 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)13 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)13 NoopEscalator (org.apache.druid.server.security.NoopEscalator)13 SegmentId (org.apache.druid.timeline.SegmentId)13 Pair (org.apache.druid.java.util.common.Pair)12 ImmutableList (com.google.common.collect.ImmutableList)11 ImmutableMap (com.google.common.collect.ImmutableMap)11 Collectors (java.util.stream.Collectors)11 Intervals (org.apache.druid.java.util.common.Intervals)11 Before (org.junit.Before)11 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Map (java.util.Map)10 TableDataSource (org.apache.druid.query.TableDataSource)10 ImmutableSet (com.google.common.collect.ImmutableSet)9