use of org.apache.druid.server.security.NoopEscalator in project druid by druid-io.
the class DruidSchemaTest method testSegmentAddedCallbackAddNewRealtimeSegment.
@Test
public void testSegmentAddedCallbackAddNewRealtimeSegment() throws InterruptedException {
String datasource = "newSegmentAddTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
};
serverView.addSegment(newSegment(datasource, 1), ServerType.REALTIME);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
Assert.assertEquals(1, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
Assert.assertTrue(schema.getMutableSegments().contains(metadata.getSegment().getId()));
}
use of org.apache.druid.server.security.NoopEscalator in project druid by druid-io.
the class DruidSchemaTest method testSegmentAddedCallbackAddNewHistoricalSegment.
@Test
public void testSegmentAddedCallbackAddNewHistoricalSegment() throws InterruptedException {
String datasource = "newSegmentAddTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
};
serverView.addSegment(newSegment(datasource, 1), ServerType.HISTORICAL);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
Assert.assertEquals(0, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
}
use of org.apache.druid.server.security.NoopEscalator in project druid by druid-io.
the class DruidSchemaTest method setUp.
@Before
public void setUp() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
druidServers = serverView.getDruidServers();
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
boolean throwException = true;
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
if (throwException) {
throwException = false;
throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
} else {
return super.refreshSegments(segments);
}
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema.start();
schema.awaitInitialization();
}
use of org.apache.druid.server.security.NoopEscalator in project druid by druid-io.
the class DruidSchemaNoDataInitTest method testInitializationWithNoData.
@Test
public void testInitializationWithNoData() throws Exception {
try (final Closer closer = Closer.create()) {
final QueryRunnerFactoryConglomerate conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(closer);
final DruidSchema druidSchema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(new SpecificSegmentsQuerySegmentWalker(conglomerate), conglomerate), new TestServerInventoryView(Collections.emptyList()), new SegmentManager(EasyMock.createMock(SegmentLoader.class)), new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null);
druidSchema.start();
druidSchema.awaitInitialization();
Assert.assertEquals(ImmutableMap.of(), druidSchema.getTableMap());
}
}
use of org.apache.druid.server.security.NoopEscalator in project druid by druid-io.
the class SqlModuleTest method makeInjectorWithProperties.
private Injector makeInjectorWithProperties(final Properties props) {
return Guice.createInjector(ImmutableList.of(new DruidGuiceExtensions(), new LifecycleModule(), new ServerModule(), new JacksonModule(), (Module) binder -> {
binder.bind(Validator.class).toInstance(Validation.buildDefaultValidatorFactory().getValidator());
binder.bind(JsonConfigurator.class).in(LazySingleton.class);
binder.bind(Properties.class).toInstance(props);
binder.bind(ExprMacroTable.class).toInstance(ExprMacroTable.nil());
binder.bind(AuthorizerMapper.class).toInstance(CalciteTests.TEST_AUTHORIZER_MAPPER);
binder.bind(Escalator.class).toInstance(new NoopEscalator());
binder.bind(ServiceEmitter.class).toInstance(serviceEmitter);
binder.bind(RequestLogger.class).toInstance(new NoopRequestLogger());
binder.bind(new TypeLiteral<Supplier<DefaultQueryConfig>>() {
}).toInstance(Suppliers.ofInstance(new DefaultQueryConfig(null)));
binder.bind(FilteredServerInventoryView.class).toInstance(inventoryView);
binder.bind(TimelineServerView.class).toInstance(timelineServerView);
binder.bind(DruidLeaderClient.class).annotatedWith(Coordinator.class).toInstance(druidLeaderClient);
binder.bind(DruidLeaderClient.class).annotatedWith(IndexingService.class).toInstance(druidLeaderClient);
binder.bind(DruidNodeDiscoveryProvider.class).toInstance(druidNodeDiscoveryProvider);
binder.bind(GenericQueryMetricsFactory.class).toInstance(genericQueryMetricsFactory);
binder.bind(QuerySegmentWalker.class).toInstance(querySegmentWalker);
binder.bind(QueryToolChestWarehouse.class).toInstance(queryToolChestWarehouse);
binder.bind(LookupExtractorFactoryContainerProvider.class).toInstance(lookupExtractorFactoryContainerProvider);
binder.bind(JoinableFactory.class).toInstance(joinableFactory);
binder.bind(SegmentLoader.class).toInstance(segmentLoader);
binder.bind(QuerySchedulerProvider.class).in(LazySingleton.class);
binder.bind(QueryScheduler.class).toProvider(QuerySchedulerProvider.class).in(LazySingleton.class);
}, new SqlModule(props), new TestViewManagerModule()));
}
Aggregations