use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DataSourcesResourceTest method testIsHandOffComplete.
@Test
public void testIsHandOffComplete() {
MetadataRuleManager databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
Rule loadRule = new IntervalLoadRule(Intervals.of("2013-01-02T00:00:00Z/2013-01-03T00:00:00Z"), null);
Rule dropRule = new IntervalDropRule(Intervals.of("2013-01-01T00:00:00Z/2013-01-02T00:00:00Z"));
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, databaseRuleManager, null, null, null);
// test dropped
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.replay(databaseRuleManager);
String interval1 = "2013-01-01T01:00:00Z/2013-01-01T02:00:00Z";
Response response1 = dataSourcesResource.isHandOffComplete("dataSource1", interval1, 1, "v1");
Assert.assertTrue((boolean) response1.getEntity());
EasyMock.verify(databaseRuleManager);
// test isn't dropped and no timeline found
EasyMock.reset(databaseRuleManager);
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(null).once();
EasyMock.replay(inventoryView, databaseRuleManager);
String interval2 = "2013-01-02T01:00:00Z/2013-01-02T02:00:00Z";
Response response2 = dataSourcesResource.isHandOffComplete("dataSource1", interval2, 1, "v1");
Assert.assertFalse((boolean) response2.getEntity());
EasyMock.verify(inventoryView, databaseRuleManager);
// test isn't dropped and timeline exist
String interval3 = "2013-01-02T02:00:00Z/2013-01-02T03:00:00Z";
SegmentLoadInfo segmentLoadInfo = new SegmentLoadInfo(createSegment(Intervals.of(interval3), "v1", 1));
segmentLoadInfo.addServer(createHistoricalServerMetadata("test"));
VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = new VersionedIntervalTimeline<String, SegmentLoadInfo>(null) {
@Override
public List<TimelineObjectHolder<String, SegmentLoadInfo>> lookupWithIncompletePartitions(Interval interval) {
PartitionHolder<SegmentLoadInfo> partitionHolder = new PartitionHolder<>(new NumberedPartitionChunk<>(1, 1, segmentLoadInfo));
List<TimelineObjectHolder<String, SegmentLoadInfo>> ret = new ArrayList<>();
ret.add(new TimelineObjectHolder<>(Intervals.of(interval3), "v1", partitionHolder));
return ret;
}
};
EasyMock.reset(inventoryView, databaseRuleManager);
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(timeline).once();
EasyMock.replay(inventoryView, databaseRuleManager);
Response response3 = dataSourcesResource.isHandOffComplete("dataSource1", interval3, 1, "v1");
Assert.assertTrue((boolean) response3.getEntity());
EasyMock.verify(inventoryView, databaseRuleManager);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidQueryTest method test_filtration_intervalInQueryFilter.
@Test
public void test_filtration_intervalInQueryFilter() {
DataSource dataSource = new TableDataSource("test");
Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, filterWithInterval, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
verify(pair, dataSource, selectorFilter, Intervals.utc(100, 200));
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidQueryTest method test_filtration_noJoinAndInterval.
@Test
public void test_filtration_noJoinAndInterval() {
DataSource dataSource = new TableDataSource("test");
Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, selectorFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
verify(pair, dataSource, selectorFilter, Intervals.ETERNITY);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.
/**
* This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
* It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
* {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
* more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
* BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
* processing. All these calls must return without heavy contention.
*/
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
DruidTable buildDruidTable(final String dataSource) {
doInLock(() -> {
try {
// Mimic some heavy work done in lock in DruidSchema
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
return super.buildDruidTable(dataSource);
}
};
int numExistingSegments = 100;
int numServers = 19;
CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {
@Override
public CallbackAction timelineInitialized() {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
segmentLoadLatch.countDown();
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentRemoved(DataSegment segment) {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
return CallbackAction.CONTINUE;
}
});
addSegmentsToCluster(0, numServers, numExistingSegments);
// Wait for all segments to be loaded in BrokerServerView
Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
// Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
Future refreshFuture = exec.submit(() -> {
schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
return null;
});
// Trigger updates of BrokerServerView. This should be done asynchronously.
// add completely new segments
addSegmentsToCluster(numExistingSegments, numServers, 50);
// add replicas of the first 30 segments.
addReplicasToCluster(1, numServers, 30);
// for the first 30 segments, we will still have replicas.
// for the other 20 segments, they will be completely removed from the cluster.
removeSegmentsFromCluster(numServers, 50);
Assert.assertFalse(refreshFuture.isDone());
for (int i = 0; i < 1000; i++) {
boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
Assert.assertTrue(hasTimeline);
// We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
Thread.sleep(2);
}
refreshFuture.get(10, TimeUnit.SECONDS);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidSchemaTest method testLocalSegmentCacheSetsDataSourceAsBroadcastButNotJoinable.
@Test
public void testLocalSegmentCacheSetsDataSourceAsBroadcastButNotJoinable() throws InterruptedException {
DruidTable fooTable = (DruidTable) schema.getTableMap().get("foo");
Assert.assertNotNull(fooTable);
Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
Assert.assertFalse(fooTable.isJoinable());
Assert.assertFalse(fooTable.isBroadcast());
// wait for build twice
Assert.assertTrue(buildTableLatch.await(1, TimeUnit.SECONDS));
buildTableLatch = new CountDownLatch(1);
final DataSegment someNewBrokerSegment = new DataSegment("foo", Intervals.of("2012/2013"), "version1", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
segmentDataSourceNames.add("foo");
serverView.addSegment(someNewBrokerSegment, ServerType.BROKER);
Assert.assertTrue(markDataSourceLatch.await(2, TimeUnit.SECONDS));
Assert.assertTrue(buildTableLatch.await(2, TimeUnit.SECONDS));
// wait for get again, just to make sure table has been updated (latch counts down just before tables are updated)
Assert.assertTrue(getDatasourcesLatch.await(2, TimeUnit.SECONDS));
fooTable = (DruidTable) schema.getTableMap().get("foo");
Assert.assertNotNull(fooTable);
Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
// should not be a GlobalTableDataSource for now, because isGlobal is couple with joinability. idealy this will be
// changed in the future and we should expect
Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
Assert.assertTrue(fooTable.isBroadcast());
Assert.assertFalse(fooTable.isJoinable());
// now remove it
markDataSourceLatch = new CountDownLatch(1);
buildTableLatch = new CountDownLatch(1);
getDatasourcesLatch = new CountDownLatch(1);
segmentDataSourceNames.remove("foo");
serverView.removeSegment(someNewBrokerSegment, ServerType.BROKER);
Assert.assertTrue(markDataSourceLatch.await(2, TimeUnit.SECONDS));
// wait for build
Assert.assertTrue(buildTableLatch.await(2, TimeUnit.SECONDS));
// wait for get again, just to make sure table has been updated (latch counts down just before tables are updated)
Assert.assertTrue(getDatasourcesLatch.await(2, TimeUnit.SECONDS));
fooTable = (DruidTable) schema.getTableMap().get("foo");
Assert.assertNotNull(fooTable);
Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
Assert.assertFalse(fooTable.isBroadcast());
Assert.assertFalse(fooTable.isJoinable());
}
Aggregations