Search in sources :

Example 6 with BrokerInternalQueryConfig

use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.

the class DruidSchemaTest method testSegmentAddedCallbackAddNewRealtimeSegment.

@Test
public void testSegmentAddedCallbackAddNewRealtimeSegment() throws InterruptedException {
    String datasource = "newSegmentAddTest";
    CountDownLatch addSegmentLatch = new CountDownLatch(1);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.addSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                addSegmentLatch.countDown();
            }
        }
    };
    serverView.addSegment(newSegment(datasource, 1), ServerType.REALTIME);
    Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(5, schema.getTotalSegments());
    List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
    Assert.assertEquals(1, metadatas.size());
    AvailableSegmentMetadata metadata = metadatas.get(0);
    Assert.assertEquals(1, metadata.isRealtime());
    Assert.assertEquals(0, metadata.getNumRows());
    Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
    Assert.assertTrue(schema.getMutableSegments().contains(metadata.getSegment().getId()));
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 7 with BrokerInternalQueryConfig

use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.

the class DruidSchemaTest method testSegmentAddedCallbackAddNewHistoricalSegment.

@Test
public void testSegmentAddedCallbackAddNewHistoricalSegment() throws InterruptedException {
    String datasource = "newSegmentAddTest";
    CountDownLatch addSegmentLatch = new CountDownLatch(1);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.addSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                addSegmentLatch.countDown();
            }
        }
    };
    serverView.addSegment(newSegment(datasource, 1), ServerType.HISTORICAL);
    Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(5, schema.getTotalSegments());
    List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
    Assert.assertEquals(1, metadatas.size());
    AvailableSegmentMetadata metadata = metadatas.get(0);
    Assert.assertEquals(0, metadata.isRealtime());
    Assert.assertEquals(0, metadata.getNumRows());
    Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 8 with BrokerInternalQueryConfig

use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.

the class DruidSchemaTest method setUp.

@Before
public void setUp() throws Exception {
    final File tmpDir = temporaryFolder.newFolder();
    final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
    final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
    walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
    final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
    final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
    serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
    druidServers = serverView.getDruidServers();
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected DruidTable buildDruidTable(String dataSource) {
            DruidTable table = super.buildDruidTable(dataSource);
            buildTableLatch.countDown();
            return table;
        }

        @Override
        void markDataSourceAsNeedRebuild(String datasource) {
            super.markDataSourceAsNeedRebuild(datasource);
            markDataSourceLatch.countDown();
        }
    };
    schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        boolean throwException = true;

        @Override
        protected DruidTable buildDruidTable(String dataSource) {
            DruidTable table = super.buildDruidTable(dataSource);
            buildTableLatch.countDown();
            return table;
        }

        @Override
        protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
            if (throwException) {
                throwException = false;
                throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
            } else {
                return super.refreshSegments(segments);
            }
        }

        @Override
        void markDataSourceAsNeedRebuild(String datasource) {
            super.markDataSourceAsNeedRebuild(datasource);
            markDataSourceLatch.countDown();
        }
    };
    schema.start();
    schema.awaitInitialization();
}
Also used : EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) IndexBuilder(org.apache.druid.segment.IndexBuilder) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) SegmentId(org.apache.druid.timeline.SegmentId) IOException(java.io.IOException) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) QueryableIndex(org.apache.druid.segment.QueryableIndex) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) File(java.io.File) Before(org.junit.Before)

Example 9 with BrokerInternalQueryConfig

use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.

the class CalciteTests method createMockSchema.

private static DruidSchema createMockSchema(final QueryRunnerFactoryConglomerate conglomerate, final SpecificSegmentsQuerySegmentWalker walker, final PlannerConfig plannerConfig, final DruidSchemaManager druidSchemaManager) {
    final DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), new TestServerInventoryView(walker.getSegments()), new SegmentManager(EasyMock.createMock(SegmentLoader.class)) {

        @Override
        public Set<String> getDataSourceNames() {
            return ImmutableSet.of(BROADCAST_DATASOURCE);
        }
    }, createDefaultJoinableFactory(), plannerConfig, TEST_AUTHENTICATOR_ESCALATOR, new BrokerInternalQueryConfig(), druidSchemaManager);
    try {
        schema.start();
        schema.awaitInitialization();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    schema.stop();
    return schema;
}
Also used : Set(java.util.Set) ImmutableSet(com.google.common.collect.ImmutableSet) HashSet(java.util.HashSet) SegmentManager(org.apache.druid.server.SegmentManager) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidSchema(org.apache.druid.sql.calcite.schema.DruidSchema) NamedDruidSchema(org.apache.druid.sql.calcite.schema.NamedDruidSchema)

Example 10 with BrokerInternalQueryConfig

use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.

/**
 * This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
 * It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
 * {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
 * more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
 * BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
 * processing. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    // Trigger updates of BrokerServerView. This should be done asynchronously.
    // add completely new segments
    addSegmentsToCluster(numExistingSegments, numServers, 50);
    // add replicas of the first 30 segments.
    addReplicasToCluster(1, numServers, 30);
    // for the first 30 segments, we will still have replicas.
    // for the other 20 segments, they will be completely removed from the cluster.
    removeSegmentsFromCluster(numServers, 50);
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertTrue(hasTimeline);
        // We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) TableDataSource(org.apache.druid.query.TableDataSource) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Aggregations

BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)16 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)15 NoopEscalator (org.apache.druid.server.security.NoopEscalator)15 DataSegment (org.apache.druid.timeline.DataSegment)13 Test (org.junit.Test)13 CountDownLatch (java.util.concurrent.CountDownLatch)11 DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)11 DruidTable (org.apache.druid.sql.calcite.table.DruidTable)10 SpecificSegmentsQuerySegmentWalker (org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker)10 TestServerInventoryView (org.apache.druid.sql.calcite.util.TestServerInventoryView)10 SegmentId (org.apache.druid.timeline.SegmentId)10 ImmutableSet (com.google.common.collect.ImmutableSet)9 File (java.io.File)9 Set (java.util.Set)9 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)9 TableDataSource (org.apache.druid.query.TableDataSource)9 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)9 DoubleSumAggregatorFactory (org.apache.druid.query.aggregation.DoubleSumAggregatorFactory)9 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)9 HyperUniquesAggregatorFactory (org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory)9