use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.
the class DruidSchemaTest method testServerSegmentRemovedCallbackRemoveBrokerSegment.
@Test
public void testServerSegmentRemovedCallbackRemoveBrokerSegment() throws InterruptedException {
String datasource = "serverSegmentRemoveTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
CountDownLatch removeServerSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
@Override
void removeServerSegment(final DruidServerMetadata server, final DataSegment segment) {
super.removeServerSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
removeServerSegmentLatch.countDown();
}
}
};
DataSegment segment = newSegment(datasource, 1);
serverView.addSegment(segment, ServerType.HISTORICAL);
serverView.addSegment(segment, ServerType.BROKER);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
serverView.removeSegment(segment, ServerType.BROKER);
Assert.assertTrue(removeServerSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
Assert.assertTrue(schema.getDataSourcesNeedingRebuild().contains(datasource));
}
use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.
the class DruidSchemaTest method testRunSegmentMetadataQueryWithContext.
/**
* Ensure that the BrokerInternalQueryConfig context is honored for this internally generated SegmentMetadata Query
*/
@Test
public void testRunSegmentMetadataQueryWithContext() throws Exception {
Map<String, Object> queryContext = ImmutableMap.of("priority", 5);
String brokerInternalQueryConfigJson = "{\"context\": { \"priority\": 5} }";
TestHelper.makeJsonMapper();
BrokerInternalQueryConfig brokerInternalQueryConfig = MAPPER.readValue(MAPPER.writeValueAsString(MAPPER.readValue(brokerInternalQueryConfigJson, BrokerInternalQueryConfig.class)), BrokerInternalQueryConfig.class);
DataSegment segment = newSegment("test", 0);
List<SegmentId> segmentIterable = ImmutableList.of(segment.getId());
// This is the query that we expect this method to create. We will be testing that it matches the query generated by the method under test.
SegmentMetadataQuery expectedMetadataQuery = new SegmentMetadataQuery(new TableDataSource(segment.getDataSource()), new MultipleSpecificSegmentSpec(segmentIterable.stream().map(SegmentId::toDescriptor).collect(Collectors.toList())), new AllColumnIncluderator(), false, queryContext, EnumSet.noneOf(SegmentMetadataQuery.AnalysisType.class), false, false);
QueryLifecycleFactory factoryMock = EasyMock.createMock(QueryLifecycleFactory.class);
QueryLifecycle lifecycleMock = EasyMock.createMock(QueryLifecycle.class);
// Need to create schema for this test because the available schemas don't mock the QueryLifecycleFactory, which I need for this test.
DruidSchema mySchema = new DruidSchema(factoryMock, serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), brokerInternalQueryConfig, null);
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
// This is the mat of the test, making sure that the query created by the method under test matches the expected query, specifically the operator configured context
EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK)).andReturn(null);
EasyMock.replay(factoryMock, lifecycleMock);
mySchema.runSegmentMetadataQuery(segmentIterable);
EasyMock.verify(factoryMock, lifecycleMock);
}
use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.
the class DruidSchemaTest method testSegmentAddedCallbackAddNewRealtimeSegment.
@Test
public void testSegmentAddedCallbackAddNewRealtimeSegment() throws InterruptedException {
String datasource = "newSegmentAddTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
};
serverView.addSegment(newSegment(datasource, 1), ServerType.REALTIME);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
Assert.assertEquals(1, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
Assert.assertTrue(schema.getMutableSegments().contains(metadata.getSegment().getId()));
}
use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.
the class DruidSchemaTest method testSegmentAddedCallbackAddNewHistoricalSegment.
@Test
public void testSegmentAddedCallbackAddNewHistoricalSegment() throws InterruptedException {
String datasource = "newSegmentAddTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
};
serverView.addSegment(newSegment(datasource, 1), ServerType.HISTORICAL);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
Assert.assertEquals(0, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
}
use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.
the class DruidSchemaTest method setUp.
@Before
public void setUp() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
druidServers = serverView.getDruidServers();
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
boolean throwException = true;
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
if (throwException) {
throwException = false;
throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
} else {
return super.refreshSegments(segments);
}
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema.start();
schema.awaitInitialization();
}
Aggregations