use of org.apache.calcite.plan.RelOptCluster in project calcite by apache.
the class VolcanoPlannerTest method testTransformLeaf.
// ~ Methods ----------------------------------------------------------------
/**
* Tests transformation of a leaf from NONE to PHYS.
*/
@Test
public void testTransformLeaf() {
VolcanoPlanner planner = new VolcanoPlanner();
planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
planner.addRule(new PhysLeafRule());
RelOptCluster cluster = newCluster(planner);
NoneLeafRel leafRel = new NoneLeafRel(cluster, "a");
RelNode convertedRel = planner.changeTraits(leafRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION));
planner.setRoot(convertedRel);
RelNode result = planner.chooseDelegate().findBestExp();
assertTrue(result instanceof PhysLeafRel);
}
use of org.apache.calcite.plan.RelOptCluster in project calcite by apache.
the class DruidTable method toRel.
public RelNode toRel(RelOptTable.ToRelContext context, RelOptTable relOptTable) {
final RelOptCluster cluster = context.getCluster();
final TableScan scan = LogicalTableScan.create(cluster, relOptTable);
return DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), relOptTable, this, ImmutableList.<RelNode>of(scan));
}
use of org.apache.calcite.plan.RelOptCluster in project storm by apache.
the class StreamsModifyRule method convert.
@Override
public RelNode convert(RelNode rel) {
final TableModify tableModify = (TableModify) rel;
final RelNode input = tableModify.getInput();
final RelOptCluster cluster = tableModify.getCluster();
final RelTraitSet traitSet = tableModify.getTraitSet().replace(StreamsLogicalConvention.INSTANCE);
final RelOptTable relOptTable = tableModify.getTable();
final Prepare.CatalogReader catalogReader = tableModify.getCatalogReader();
final RelNode convertedInput = convert(input, input.getTraitSet().replace(StreamsLogicalConvention.INSTANCE));
final TableModify.Operation operation = tableModify.getOperation();
final List<String> updateColumnList = tableModify.getUpdateColumnList();
final List<RexNode> sourceExpressionList = tableModify.getSourceExpressionList();
final boolean flattened = tableModify.isFlattened();
int primaryKey;
StormTable stormTable = tableModify.getTable().unwrap(StormTable.class);
if (stormTable != null) {
primaryKey = stormTable.primaryKey();
} else {
StormStreamableTable streamableTable = tableModify.getTable().unwrap(StormStreamableTable.class);
if (streamableTable != null) {
primaryKey = streamableTable.primaryKey();
} else {
throw new IllegalStateException("Table must be able to unwrap with StormTable or StormStreamableTable.");
}
}
final Table table = tableModify.getTable().unwrap(Table.class);
switch(table.getJdbcTableType()) {
case STREAM:
if (operation != TableModify.Operation.INSERT) {
throw new UnsupportedOperationException(String.format("Stream doesn't support %s modify operation", operation));
}
return new StreamsStreamInsertRel(cluster, traitSet, relOptTable, catalogReader, convertedInput, operation, updateColumnList, sourceExpressionList, flattened, primaryKey);
default:
throw new IllegalArgumentException(String.format("Unsupported table type: %s", table.getJdbcTableType()));
}
}
use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class HiveParserBaseSemanticAnalyzer method genValues.
public static RelNode genValues(String tabAlias, Table tmpTable, HiveParserRowResolver rowResolver, RelOptCluster cluster, List<List<String>> values) {
List<TypeInfo> tmpTableTypes = tmpTable.getCols().stream().map(f -> TypeInfoUtils.getTypeInfoFromTypeString(f.getType())).collect(Collectors.toList());
RexBuilder rexBuilder = cluster.getRexBuilder();
// calcite types for each field
List<RelDataType> calciteTargetTypes = tmpTableTypes.stream().map(ti -> HiveParserTypeConverter.convert((PrimitiveTypeInfo) ti, rexBuilder.getTypeFactory())).collect(Collectors.toList());
// calcite field names
List<String> calciteFieldNames = IntStream.range(0, calciteTargetTypes.size()).mapToObj(SqlUtil::deriveAliasFromOrdinal).collect(Collectors.toList());
// calcite type for each row
List<RelDataType> calciteRowTypes = new ArrayList<>();
List<List<RexLiteral>> rows = new ArrayList<>();
for (List<String> value : values) {
Preconditions.checkArgument(value.size() == tmpTableTypes.size(), String.format("Values table col length (%d) and data length (%d) mismatch", tmpTableTypes.size(), value.size()));
List<RexLiteral> row = new ArrayList<>();
for (int i = 0; i < tmpTableTypes.size(); i++) {
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) tmpTableTypes.get(i);
RelDataType calciteType = calciteTargetTypes.get(i);
String col = value.get(i);
if (col == null) {
row.add(rexBuilder.makeNullLiteral(calciteType));
} else {
switch(primitiveTypeInfo.getPrimitiveCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
row.add(rexBuilder.makeExactLiteral(new BigDecimal(col), calciteType));
break;
case DECIMAL:
BigDecimal bigDec = new BigDecimal(col);
row.add(SqlTypeUtil.isValidDecimalValue(bigDec, calciteType) ? rexBuilder.makeExactLiteral(bigDec, calciteType) : rexBuilder.makeNullLiteral(calciteType));
break;
case FLOAT:
case DOUBLE:
row.add(rexBuilder.makeApproxLiteral(new BigDecimal(col), calciteType));
break;
case BOOLEAN:
row.add(rexBuilder.makeLiteral(Boolean.parseBoolean(col)));
break;
default:
row.add(rexBuilder.makeCharLiteral(HiveParserUtils.asUnicodeString(col)));
}
}
}
calciteRowTypes.add(rexBuilder.getTypeFactory().createStructType(row.stream().map(RexLiteral::getType).collect(Collectors.toList()), calciteFieldNames));
rows.add(row);
}
// compute the final row type
RelDataType calciteRowType = rexBuilder.getTypeFactory().leastRestrictive(calciteRowTypes);
for (int i = 0; i < calciteFieldNames.size(); i++) {
ColumnInfo colInfo = new ColumnInfo(calciteFieldNames.get(i), HiveParserTypeConverter.convert(calciteRowType.getFieldList().get(i).getType()), tabAlias, false);
rowResolver.put(tabAlias, calciteFieldNames.get(i), colInfo);
}
return HiveParserUtils.genValuesRelNode(cluster, rexBuilder.getTypeFactory().createStructType(calciteRowType.getFieldList()), rows);
}
use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class LogicalSnapshot method create.
/**
* Creates a LogicalSnapshot.
*/
public static LogicalSnapshot create(RelNode input, RexNode period) {
final RelOptCluster cluster = input.getCluster();
final RelMetadataQuery mq = cluster.getMetadataQuery();
final RelTraitSet traitSet = cluster.traitSet().replace(Convention.NONE).replaceIfs(RelCollationTraitDef.INSTANCE, () -> RelMdCollation.snapshot(mq, input)).replaceIf(RelDistributionTraitDef.INSTANCE, () -> RelMdDistribution.snapshot(mq, input));
return new LogicalSnapshot(cluster, traitSet, input, period);
}
Aggregations