use of org.apache.druid.query.DataSource in project druid by druid-io.
the class DruidQueryTest method test_filtration_joinDataSource_intervalInBaseTableFilter_left.
@Test
public void test_filtration_joinDataSource_intervalInBaseTableFilter_left() {
DataSource dataSource = join(JoinType.LEFT, filterWithInterval);
DataSource expectedDataSource = join(JoinType.LEFT, selectorFilter);
Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, otherFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
verify(pair, expectedDataSource, otherFilter, Intervals.utc(100, 200));
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class ServerManagerTest method testGetQueryRunnerForSegmentsForUnknownQueryThrowingException.
@Test
public void testGetQueryRunnerForSegmentsForUnknownQueryThrowingException() {
final Interval interval = Intervals.of("P1d/2011-04-01");
final List<SegmentDescriptor> descriptors = Collections.singletonList(new SegmentDescriptor(interval, "1", 0));
expectedException.expect(QueryUnsupportedException.class);
expectedException.expectMessage("Unknown query type");
serverManager.getQueryRunnerForSegments(new BaseQuery<Object>(new TableDataSource("test"), new MultipleSpecificSegmentSpec(descriptors), false, new HashMap<>()) {
@Override
public boolean hasFilters() {
return false;
}
@Override
public DimFilter getFilter() {
return null;
}
@Override
public String getType() {
return null;
}
@Override
public Query<Object> withOverriddenContext(Map<String, Object> contextOverride) {
return null;
}
@Override
public Query<Object> withQuerySegmentSpec(QuerySegmentSpec spec) {
return null;
}
@Override
public Query<Object> withDataSource(DataSource dataSource) {
return null;
}
}, descriptors);
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class SegmentManagerBroadcastJoinIndexedTableTest method testLoadMultipleIndexedTable.
@Test
public void testLoadMultipleIndexedTable() throws IOException, SegmentLoadingException {
final DataSource dataSource = new GlobalTableDataSource(TABLE_NAME);
Assert.assertFalse(joinableFactory.isDirectlyJoinable(dataSource));
final String version = DateTimes.nowUtc().toString();
final String version2 = DateTimes.nowUtc().plus(1000L).toString();
final String interval = "2011-01-12T00:00:00.000Z/2011-05-01T00:00:00.000Z";
final String interval2 = "2011-01-12T00:00:00.000Z/2011-03-28T00:00:00.000Z";
IncrementalIndex data = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv.bottom");
IncrementalIndex data2 = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv.top");
Assert.assertTrue(segmentManager.loadSegment(createSegment(data, interval, version), false, SegmentLazyLoadFailCallback.NOOP));
Assert.assertTrue(joinableFactory.isDirectlyJoinable(dataSource));
Optional<Joinable> maybeJoinable = makeJoinable(dataSource);
Assert.assertTrue(maybeJoinable.isPresent());
Joinable joinable = maybeJoinable.get();
// cardinality currently tied to number of rows,
Assert.assertEquals(733, joinable.getCardinality("market"));
Assert.assertEquals(733, joinable.getCardinality("placement"));
Assert.assertEquals(Optional.of(ImmutableSet.of("preferred")), joinable.getCorrelatedColumnValues("market", "spot", "placement", Long.MAX_VALUE, false));
// add another segment with smaller interval, only partially overshadows so there will be 2 segments in timeline
Assert.assertTrue(segmentManager.loadSegment(createSegment(data2, interval2, version2), false, SegmentLazyLoadFailCallback.NOOP));
expectedException.expect(ISE.class);
expectedException.expectMessage(StringUtils.format("Currently only single segment datasources are supported for broadcast joins, dataSource[%s] has multiple segments. Reingest the data so that it is entirely contained within a single segment to use in JOIN queries.", TABLE_NAME));
// this will explode because datasource has multiple segments which is an invalid state for the joinable factory
makeJoinable(dataSource);
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class SegmentManagerBroadcastJoinIndexedTableTest method emptyCacheKeyForUnsupportedCondition.
@Test
public void emptyCacheKeyForUnsupportedCondition() {
final DataSource dataSource = new GlobalTableDataSource(TABLE_NAME);
JoinConditionAnalysis condition = EasyMock.mock(JoinConditionAnalysis.class);
EasyMock.expect(condition.canHashJoin()).andReturn(false);
EasyMock.replay(condition);
Assert.assertNull(joinableFactory.build(dataSource, condition).orElse(null));
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class SegmentManagerBroadcastJoinIndexedTableTest method testLoadMultipleIndexedTableOverwrite.
@Test
public void testLoadMultipleIndexedTableOverwrite() throws IOException, SegmentLoadingException {
final DataSource dataSource = new GlobalTableDataSource(TABLE_NAME);
Assert.assertFalse(joinableFactory.isDirectlyJoinable(dataSource));
// larger interval overwrites smaller interval
final String version = DateTimes.nowUtc().toString();
final String version2 = DateTimes.nowUtc().plus(1000L).toString();
final String interval = "2011-01-12T00:00:00.000Z/2011-03-28T00:00:00.000Z";
final String interval2 = "2011-01-12T00:00:00.000Z/2011-05-01T00:00:00.000Z";
IncrementalIndex data = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv.top");
IncrementalIndex data2 = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv.bottom");
DataSegment segment1 = createSegment(data, interval, version);
DataSegment segment2 = createSegment(data2, interval2, version2);
Assert.assertTrue(segmentManager.loadSegment(segment1, false, SegmentLazyLoadFailCallback.NOOP));
Assert.assertTrue(segmentManager.loadSegment(segment2, false, SegmentLazyLoadFailCallback.NOOP));
Assert.assertTrue(joinableFactory.isDirectlyJoinable(dataSource));
Optional<Joinable> maybeJoinable = makeJoinable(dataSource);
Assert.assertTrue(maybeJoinable.isPresent());
Joinable joinable = maybeJoinable.get();
// cardinality currently tied to number of rows,
Assert.assertEquals(733, joinable.getCardinality("market"));
Assert.assertEquals(733, joinable.getCardinality("placement"));
Assert.assertEquals(Optional.of(ImmutableSet.of("preferred")), joinable.getCorrelatedColumnValues("market", "spot", "placement", Long.MAX_VALUE, false));
Optional<byte[]> cacheKey = joinableFactory.computeJoinCacheKey(dataSource, JOIN_CONDITION_ANALYSIS);
Assert.assertTrue(cacheKey.isPresent());
assertSegmentIdEquals(segment2.getId(), cacheKey.get());
segmentManager.dropSegment(segment2);
// if new segment is dropped for some reason that probably never happens, old table should still exist..
maybeJoinable = makeJoinable(dataSource);
Assert.assertTrue(maybeJoinable.isPresent());
joinable = maybeJoinable.get();
// cardinality currently tied to number of rows,
Assert.assertEquals(478, joinable.getCardinality("market"));
Assert.assertEquals(478, joinable.getCardinality("placement"));
Assert.assertEquals(Optional.of(ImmutableSet.of("preferred")), joinable.getCorrelatedColumnValues("market", "spot", "placement", Long.MAX_VALUE, false));
cacheKey = joinableFactory.computeJoinCacheKey(dataSource, JOIN_CONDITION_ANALYSIS);
Assert.assertTrue(cacheKey.isPresent());
assertSegmentIdEquals(segment1.getId(), cacheKey.get());
}
Aggregations