use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class CoordinatorServerViewTest method testMultipleServerAddedRemovedSegment.
@Test
public void testMultipleServerAddedRemovedSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(5);
// temporarily set latch count to 1
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final List<DruidServer> druidServers = Lists.transform(ImmutableList.of("localhost:0", "localhost:1", "localhost:2", "localhost:3", "localhost:4"), new Function<String, DruidServer>() {
@Override
public DruidServer apply(String input) {
return new DruidServer(input, input, null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
}
});
for (DruidServer druidServer : druidServers) {
setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
}
final List<DataSegment> segments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-01/2011-04-09", "v2"), Pair.of("2011-04-06/2011-04-09", "v3"), Pair.of("2011-04-01/2011-04-02", "v3")), new Function<Pair<String, String>, DataSegment>() {
@Override
public DataSegment apply(Pair<String, String> input) {
return dataSegmentWithIntervalAndVersion(input.lhs, input.rhs);
}
});
for (int i = 0; i < 5; ++i) {
announceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce the segment created by dataSegmentWithIntervalAndVersion("2011-04-01/2011-04-09", "v2")
unannounceSegmentForServer(druidServers.get(2), segments.get(2));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// renew segmentRemovedLatch since we still have 4 segments to unannounce
segmentRemovedLatch = new CountDownLatch(4);
timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-03", "v1", druidServers.get(0), segments.get(0)), createExpected("2011-04-03/2011-04-06", "v1", druidServers.get(1), segments.get(1)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce all the segments
for (int i = 0; i < 5; ++i) {
// skip the one that was previously unannounced
if (i != 2) {
unannounceSegmentForServer(druidServers.get(i), segments.get(i));
}
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09"))).size());
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class BrokerServerViewTest method testRealtimeTasksNotWatched.
@Test
public void testRealtimeTasksNotWatched() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(4);
segmentRemovedLatch = new CountDownLatch(0);
// Setup a Broker that watches only Historicals
setupViews(null, null, false);
// Historical has segments 2 and 3, Realtime has segments 1 and 2
final DruidServer realtimeServer = setupDruidServer(ServerType.INDEXER_EXECUTOR, null, "realtime:1", 1);
final DruidServer historicalServer = setupHistoricalServer("tier1", "historical:2", 1);
final DataSegment segment1 = dataSegmentWithIntervalAndVersion("2020-01-01/P1D", "v1");
announceSegmentForServer(realtimeServer, segment1, zkPathsConfig, jsonMapper);
final DataSegment segment2 = dataSegmentWithIntervalAndVersion("2020-01-02/P1D", "v1");
announceSegmentForServer(realtimeServer, segment2, zkPathsConfig, jsonMapper);
announceSegmentForServer(historicalServer, segment2, zkPathsConfig, jsonMapper);
final DataSegment segment3 = dataSegmentWithIntervalAndVersion("2020-01-03/P1D", "v1");
announceSegmentForServer(historicalServer, segment3, zkPathsConfig, jsonMapper);
// Wait for the segments to be added
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
// Get the timeline for the datasource
TimelineLookup<String, ServerSelector> timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(segment1.getDataSource()))).get();
// Verify that the timeline has no entry for the interval of segment 1
Assert.assertTrue(timeline.lookup(segment1.getInterval()).isEmpty());
// Verify that there is one entry for the interval of segment 2
List<TimelineObjectHolder<String, ServerSelector>> timelineHolders = timeline.lookup(segment2.getInterval());
Assert.assertEquals(1, timelineHolders.size());
TimelineObjectHolder<String, ServerSelector> timelineHolder = timelineHolders.get(0);
Assert.assertEquals(segment2.getInterval(), timelineHolder.getInterval());
Assert.assertEquals(segment2.getVersion(), timelineHolder.getVersion());
PartitionHolder<ServerSelector> partitionHolder = timelineHolder.getObject();
Assert.assertTrue(partitionHolder.isComplete());
Assert.assertEquals(1, Iterables.size(partitionHolder));
ServerSelector selector = (partitionHolder.iterator().next()).getObject();
Assert.assertFalse(selector.isEmpty());
Assert.assertEquals(segment2, selector.getSegment());
// Verify that the ServerSelector always picks the Historical server
for (int i = 0; i < 5; ++i) {
Assert.assertEquals(historicalServer, selector.pick(null).getServer());
}
Assert.assertEquals(Collections.singletonList(historicalServer.getMetadata()), selector.getCandidates(2));
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class BrokerServerViewTest method testMultipleServerAddedRemovedSegment.
@Test
public void testMultipleServerAddedRemovedSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(5);
// temporarily set latch count to 1
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final List<DruidServer> druidServers = Lists.transform(ImmutableList.of("locahost:0", "localhost:1", "localhost:2", "localhost:3", "localhost:4"), hostname -> setupHistoricalServer("default_tier", hostname, 0));
final List<DataSegment> segments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-01/2011-04-09", "v2"), Pair.of("2011-04-06/2011-04-09", "v3"), Pair.of("2011-04-01/2011-04-02", "v3")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
for (int i = 0; i < 5; ++i) {
announceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource("test_broker_server_view"))).get();
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce the segment created by dataSegmentWithIntervalAndVersion("2011-04-01/2011-04-09", "v2")
unannounceSegmentForServer(druidServers.get(2), segments.get(2), zkPathsConfig);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// renew segmentRemovedLatch since we still have 4 segments to unannounce
segmentRemovedLatch = new CountDownLatch(4);
timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource("test_broker_server_view"))).get();
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-03", "v1", druidServers.get(0), segments.get(0)), createExpected("2011-04-03/2011-04-06", "v1", druidServers.get(1), segments.get(1)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce all the segments
for (int i = 0; i < 5; ++i) {
// skip the one that was previously unannounced
if (i != 2) {
unannounceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig);
}
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09"))).size());
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class BrokerServerViewTest method testMultipleServerAndBroker.
@Test
public void testMultipleServerAndBroker() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(6);
// temporarily set latch count to 1
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final DruidServer druidBroker = new DruidServer("localhost:5", "localhost:5", null, 10000000L, ServerType.BROKER, "default_tier", 0);
final List<DruidServer> druidServers = Lists.transform(ImmutableList.of("locahost:0", "localhost:1", "localhost:2", "localhost:3", "localhost:4"), hostname -> setupHistoricalServer("default_tier", hostname, 0));
setupZNodeForServer(druidBroker, zkPathsConfig, jsonMapper);
final List<DataSegment> segments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-01/2011-04-09", "v2"), Pair.of("2011-04-06/2011-04-09", "v3"), Pair.of("2011-04-01/2011-04-02", "v3")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
DataSegment brokerSegment = dataSegmentWithIntervalAndVersion("2011-04-01/2011-04-11", "v4");
announceSegmentForServer(druidBroker, brokerSegment, zkPathsConfig, jsonMapper);
for (int i = 0; i < 5; ++i) {
announceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource("test_broker_server_view"))).get();
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce the broker segment should do nothing to announcements
unannounceSegmentForServer(druidBroker, brokerSegment, zkPathsConfig);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// renew segmentRemovedLatch since we still have 5 segments to unannounce
segmentRemovedLatch = new CountDownLatch(5);
timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource("test_broker_server_view"))).get();
// expect same set of segments as before
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce all the segments
for (int i = 0; i < 5; ++i) {
unannounceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidUnionDataSourceRel method toDruidQuery.
@Override
public DruidQuery toDruidQuery(final boolean finalizeAggregations) {
final List<TableDataSource> dataSources = new ArrayList<>();
RowSignature signature = null;
for (final RelNode relNode : unionRel.getInputs()) {
final DruidRel<?> druidRel = (DruidRel<?>) relNode;
if (!DruidRels.isScanOrMapping(druidRel, false)) {
getPlannerContext().setPlanningError("SQL requires union between inputs that are not simple table scans " + "and involve a filter or aliasing");
throw new CannotBuildQueryException(druidRel);
}
final DruidQuery query = druidRel.toDruidQuery(false);
final DataSource dataSource = query.getDataSource();
if (!(dataSource instanceof TableDataSource)) {
getPlannerContext().setPlanningError("SQL requires union with input of '%s' type that is not supported." + " Union operation is only supported between regular tables. ", dataSource.getClass().getSimpleName());
throw new CannotBuildQueryException(druidRel);
}
if (signature == null) {
signature = query.getOutputRowSignature();
}
if (signature.getColumnNames().equals(query.getOutputRowSignature().getColumnNames())) {
dataSources.add((TableDataSource) dataSource);
} else {
getPlannerContext().setPlanningError("There is a mismatch between the output row signature of input tables and the row signature of union output.");
throw new CannotBuildQueryException(druidRel);
}
}
if (signature == null) {
// No inputs.
throw new CannotBuildQueryException(unionRel);
}
// creation time.
if (!signature.getColumnNames().equals(unionColumnNames)) {
throw new CannotBuildQueryException(unionRel);
}
return partialQuery.build(new UnionDataSource(dataSources), signature, getPlannerContext(), getCluster().getRexBuilder(), finalizeAggregations);
}
Aggregations