use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidSchema method buildDruidTable.
@VisibleForTesting
DruidTable buildDruidTable(final String dataSource) {
ConcurrentSkipListMap<SegmentId, AvailableSegmentMetadata> segmentsMap = segmentMetadataInfo.get(dataSource);
final Map<String, ColumnType> columnTypes = new TreeMap<>();
if (segmentsMap != null) {
for (AvailableSegmentMetadata availableSegmentMetadata : segmentsMap.values()) {
final RowSignature rowSignature = availableSegmentMetadata.getRowSignature();
if (rowSignature != null) {
for (String column : rowSignature.getColumnNames()) {
// Newer column types should override older ones.
final ColumnType columnType = rowSignature.getColumnType(column).orElseThrow(() -> new ISE("Encountered null type for column[%s]", column));
columnTypes.putIfAbsent(column, columnType);
}
}
}
}
final RowSignature.Builder builder = RowSignature.builder();
columnTypes.forEach(builder::add);
final TableDataSource tableDataSource;
// to be a GlobalTableDataSource instead of a TableDataSource, it must appear on all servers (inferred by existing
// in the segment cache, which in this case belongs to the broker meaning only broadcast segments live here)
// to be joinable, it must be possibly joinable according to the factory. we only consider broadcast datasources
// at this time, and isGlobal is currently strongly coupled with joinable, so only make a global table datasource
// if also joinable
final GlobalTableDataSource maybeGlobal = new GlobalTableDataSource(dataSource);
final boolean isJoinable = joinableFactory.isDirectlyJoinable(maybeGlobal);
final boolean isBroadcast = segmentManager.getDataSourceNames().contains(dataSource);
if (isBroadcast && isJoinable) {
tableDataSource = maybeGlobal;
} else {
tableDataSource = new TableDataSource(dataSource);
}
return new DruidTable(tableDataSource, builder.build(), null, isJoinable, isBroadcast);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class DruidUnionDataSourceRule method getColumnNamesIfTableOrUnion.
static Optional<List<String>> getColumnNamesIfTableOrUnion(final DruidRel<?> druidRel, @Nullable PlannerContext plannerContext) {
final PartialDruidQuery partialQuery = druidRel.getPartialDruidQuery();
final Optional<DruidTable> druidTable = DruidRels.druidTableIfLeafRel(druidRel).filter(table -> table.getDataSource() instanceof TableDataSource);
if (druidTable.isPresent() && DruidRels.isScanOrMapping(druidRel, false)) {
if (partialQuery.stage() == PartialDruidQuery.Stage.SCAN) {
return Optional.of(druidTable.get().getRowSignature().getColumnNames());
} else {
// Sanity check. Expected to be true due to the "scan or mapping" check.
if (partialQuery.stage() != PartialDruidQuery.Stage.SELECT_PROJECT) {
throw new ISE("Expected stage %s but got %s", PartialDruidQuery.Stage.SELECT_PROJECT, partialQuery.stage());
}
// Apply the mapping (with additional sanity checks).
final RowSignature tableSignature = druidTable.get().getRowSignature();
final Mappings.TargetMapping mapping = partialQuery.getSelectProject().getMapping();
if (mapping.getSourceCount() != tableSignature.size()) {
throw new ISE("Expected mapping with %d columns but got %d columns", tableSignature.size(), mapping.getSourceCount());
}
final List<String> retVal = new ArrayList<>();
for (int i = 0; i < mapping.getTargetCount(); i++) {
final int sourceField = mapping.getSourceOpt(i);
retVal.add(tableSignature.getColumnName(sourceField));
}
return Optional.of(retVal);
}
} else if (!druidTable.isPresent() && druidRel instanceof DruidUnionDataSourceRel) {
return Optional.of(((DruidUnionDataSourceRel) druidRel).getUnionColumnNames());
} else if (druidTable.isPresent()) {
if (null != plannerContext) {
plannerContext.setPlanningError("SQL requires union between inputs that are not simple table scans " + "and involve a filter or aliasing. Or column types of tables being unioned are not of same type.");
}
return Optional.empty();
} else {
if (null != plannerContext) {
plannerContext.setPlanningError("SQL requires union with input of a datasource type that is not supported." + " Union operation is only supported between regular tables. ");
}
return Optional.empty();
}
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class JoinableFactoryWrapperTest method test_createSegmentMapFn_usableClause.
@Test
public void test_createSegmentMapFn_usableClause() {
final LookupDataSource lookupDataSource = new LookupDataSource("lookyloo");
final JoinConditionAnalysis conditionAnalysis = JoinConditionAnalysis.forExpression("x == \"j.x\"", "j.", ExprMacroTable.nil());
final PreJoinableClause clause = new PreJoinableClause("j.", lookupDataSource, JoinType.LEFT, conditionAnalysis);
JoinableFactoryWrapper joinableFactoryWrapper = new JoinableFactoryWrapper(new JoinableFactory() {
@Override
public boolean isDirectlyJoinable(DataSource dataSource) {
return dataSource.equals(lookupDataSource);
}
@Override
public Optional<Joinable> build(DataSource dataSource, JoinConditionAnalysis condition) {
if (dataSource.equals(lookupDataSource) && condition.equals(conditionAnalysis)) {
return Optional.of(LookupJoinable.wrap(new MapLookupExtractor(ImmutableMap.of("k", "v"), false)));
} else {
return Optional.empty();
}
}
});
final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(null, ImmutableList.of(clause), new AtomicLong(), new TestQuery(new TableDataSource("test"), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("0/100"))), false, new HashMap()));
Assert.assertNotSame(Function.identity(), segmentMapFn);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class ClientQuerySegmentWalkerTest method testJoinOnGroupByOnTable.
@Test
public void testJoinOnGroupByOnTable() {
final GroupByQuery subquery = GroupByQuery.builder().setDataSource(FOO).setGranularity(Granularities.ALL).setInterval(Collections.singletonList(INTERVAL)).setDimensions(DefaultDimensionSpec.of("s")).setDimFilter(new SelectorDimFilter("s", "y", null)).build();
final GroupByQuery query = (GroupByQuery) GroupByQuery.builder().setDataSource(JoinDataSource.create(new TableDataSource(FOO), new QueryDataSource(subquery), "j.", "\"j.s\" == \"s\"", JoinType.INNER, null, ExprMacroTable.nil())).setGranularity(Granularities.ALL).setInterval(Intervals.ONLY_ETERNITY).setDimensions(DefaultDimensionSpec.of("s"), DefaultDimensionSpec.of("j.s")).setAggregatorSpecs(new CountAggregatorFactory("cnt")).build().withId(DUMMY_QUERY_ID);
testQuery(query, ImmutableList.of(ExpectedQuery.cluster(subquery.withId(DUMMY_QUERY_ID).withSubQueryId("2.1")), ExpectedQuery.cluster(query.withDataSource(query.getDataSource().withChildren(ImmutableList.of(query.getDataSource().getChildren().get(0), InlineDataSource.fromIterable(ImmutableList.of(new Object[] { "y" }), RowSignature.builder().add("s", ColumnType.STRING).build())))))), ImmutableList.of(new Object[] { "y", "y", 1L }));
// note: this should really be 1, but in the interim queries that are composed of multiple queries count each
// invocation of either the cluster or local walker in ClientQuerySegmentWalker
Assert.assertEquals(2, scheduler.getTotalRun().get());
Assert.assertEquals(2, scheduler.getTotalPrioritizedAndLaned().get());
Assert.assertEquals(2, scheduler.getTotalAcquired().get());
Assert.assertEquals(2, scheduler.getTotalReleased().get());
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class ClientQuerySegmentWalkerTest method testJoinOnTableErrorCantInlineTable.
@Test
public void testJoinOnTableErrorCantInlineTable() {
final GroupByQuery query = (GroupByQuery) GroupByQuery.builder().setDataSource(JoinDataSource.create(new TableDataSource(FOO), new TableDataSource(BAR), "j.", "\"j.s\" == \"s\"", JoinType.INNER, null, ExprMacroTable.nil())).setGranularity(Granularities.ALL).setInterval(Intervals.ONLY_ETERNITY).setDimensions(DefaultDimensionSpec.of("s"), DefaultDimensionSpec.of("j.s")).setAggregatorSpecs(new CountAggregatorFactory("cnt")).build().withId(DUMMY_QUERY_ID);
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("Cannot handle subquery structure for dataSource");
testQuery(query, ImmutableList.of(), ImmutableList.of());
}
Aggregations