use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptTable in project storm by apache.
the class StreamsModifyRule method convert.
@Override
public RelNode convert(RelNode rel) {
final TableModify tableModify = (TableModify) rel;
final RelNode input = tableModify.getInput();
final RelOptCluster cluster = tableModify.getCluster();
final RelTraitSet traitSet = tableModify.getTraitSet().replace(StreamsLogicalConvention.INSTANCE);
final RelOptTable relOptTable = tableModify.getTable();
final Prepare.CatalogReader catalogReader = tableModify.getCatalogReader();
final RelNode convertedInput = convert(input, input.getTraitSet().replace(StreamsLogicalConvention.INSTANCE));
final TableModify.Operation operation = tableModify.getOperation();
final List<String> updateColumnList = tableModify.getUpdateColumnList();
final List<RexNode> sourceExpressionList = tableModify.getSourceExpressionList();
final boolean flattened = tableModify.isFlattened();
int primaryKey;
StormTable stormTable = tableModify.getTable().unwrap(StormTable.class);
if (stormTable != null) {
primaryKey = stormTable.primaryKey();
} else {
StormStreamableTable streamableTable = tableModify.getTable().unwrap(StormStreamableTable.class);
if (streamableTable != null) {
primaryKey = streamableTable.primaryKey();
} else {
throw new IllegalStateException("Table must be able to unwrap with StormTable or StormStreamableTable.");
}
}
final Table table = tableModify.getTable().unwrap(Table.class);
switch(table.getJdbcTableType()) {
case STREAM:
if (operation != TableModify.Operation.INSERT) {
throw new UnsupportedOperationException(String.format("Stream doesn't support %s modify operation", operation));
}
return new StreamsStreamInsertRel(cluster, traitSet, relOptTable, catalogReader, convertedInput, operation, updateColumnList, sourceExpressionList, flattened, primaryKey);
default:
throw new IllegalArgumentException(String.format("Unsupported table type: %s", table.getJdbcTableType()));
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptTable in project flink by apache.
the class SqlValidatorImpl method validateUpdate.
public void validateUpdate(SqlUpdate call) {
final SqlValidatorNamespace targetNamespace = getNamespace(call);
validateNamespace(targetNamespace, unknownType);
final RelOptTable relOptTable = SqlValidatorUtil.getRelOptTable(targetNamespace, catalogReader.unwrap(Prepare.CatalogReader.class), null, null);
final SqlValidatorTable table = relOptTable == null ? targetNamespace.getTable() : relOptTable.unwrap(SqlValidatorTable.class);
final RelDataType targetRowType = createTargetRowType(table, call.getTargetColumnList(), true);
final SqlSelect select = call.getSourceSelect();
validateSelect(select, targetRowType);
final RelDataType sourceRowType = getValidatedNodeType(select);
checkTypeAssignment(scopes.get(select), table, sourceRowType, targetRowType, call);
checkConstraint(table, call, targetRowType);
validateAccess(call.getTargetTable(), table, SqlAccessEnum.UPDATE);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptTable in project flink by apache.
the class SqlValidatorImpl method validateInsert.
public void validateInsert(SqlInsert insert) {
final SqlValidatorNamespace targetNamespace = getNamespace(insert);
validateNamespace(targetNamespace, unknownType);
final RelOptTable relOptTable = SqlValidatorUtil.getRelOptTable(targetNamespace, catalogReader.unwrap(Prepare.CatalogReader.class), null, null);
final SqlValidatorTable table = relOptTable == null ? targetNamespace.getTable() : relOptTable.unwrap(SqlValidatorTable.class);
// INSERT has an optional column name list. If present then
// reduce the rowtype to the columns specified. If not present
// then the entire target rowtype is used.
final RelDataType targetRowType = createTargetRowType(table, insert.getTargetColumnList(), false);
final SqlNode source = insert.getSource();
if (source instanceof SqlSelect) {
final SqlSelect sqlSelect = (SqlSelect) source;
validateSelect(sqlSelect, targetRowType);
} else {
final SqlValidatorScope scope = scopes.get(source);
validateQuery(source, scope, targetRowType);
}
// REVIEW jvs 4-Dec-2008: In FRG-365, this namespace row type is
// discarding the type inferred by inferUnknownTypes (which was invoked
// from validateSelect above). It would be better if that information
// were used here so that we never saw any untyped nulls during
// checkTypeAssignment.
final RelDataType sourceRowType = getNamespace(source).getRowType();
final RelDataType logicalTargetRowType = getLogicalTargetRowType(targetRowType, insert);
setValidatedNodeType(insert, logicalTargetRowType);
final RelDataType logicalSourceRowType = getLogicalSourceRowType(sourceRowType, insert);
final List<ColumnStrategy> strategies = table.unwrap(RelOptTable.class).getColumnStrategies();
final RelDataType realTargetRowType = typeFactory.createStructType(logicalTargetRowType.getFieldList().stream().filter(f -> strategies.get(f.getIndex()).canInsertInto()).collect(Collectors.toList()));
final RelDataType targetRowTypeToValidate = logicalSourceRowType.getFieldCount() == logicalTargetRowType.getFieldCount() ? logicalTargetRowType : realTargetRowType;
checkFieldCount(insert.getTargetTable(), table, strategies, targetRowTypeToValidate, realTargetRowType, source, logicalSourceRowType, logicalTargetRowType);
checkTypeAssignment(scopes.get(source), table, logicalSourceRowType, targetRowTypeToValidate, insert);
checkConstraint(table, source, logicalTargetRowType);
validateAccess(insert.getTargetTable(), table, SqlAccessEnum.INSERT);
// Refresh the insert row type to keep sync with source.
setValidatedNodeType(insert, targetRowTypeToValidate);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptTable in project hazelcast by hazelcast.
the class IndexResolver method createFullIndexScan.
/**
* Creates an index scan without any filter.
*
* @param scan the original scan operator
* @param index available indexes
* @param ascs the collation of index fields
* @param nonEmptyCollation whether to filter out full index scan with no collation
* @return index scan or {@code null}
*/
private static RelNode createFullIndexScan(FullScanLogicalRel scan, MapTableIndex index, List<Boolean> ascs, boolean nonEmptyCollation) {
assert isIndexSupported(index);
RexNode scanFilter = OptUtils.extractHazelcastTable(scan).getFilter();
RelTraitSet traitSet = OptUtils.toPhysicalConvention(scan.getTraitSet());
RelCollation relCollation = buildCollationTrait(scan, index, ascs);
if (nonEmptyCollation && relCollation.getFieldCollations().size() == 0) {
// Don't make a full scan with empty collation
return null;
}
traitSet = OptUtils.traitPlus(traitSet, relCollation);
HazelcastRelOptTable originalRelTable = (HazelcastRelOptTable) scan.getTable();
HazelcastTable originalHazelcastTable = OptUtils.extractHazelcastTable(scan);
RelOptTable newRelTable = createRelTable(originalRelTable.getDelegate().getQualifiedName(), originalHazelcastTable.withFilter(null), scan.getCluster().getTypeFactory());
return new IndexScanMapPhysicalRel(scan.getCluster(), traitSet, newRelTable, index, null, null, scanFilter);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptTable in project storm by apache.
the class TridentModifyRule method convert.
@Override
public RelNode convert(RelNode rel) {
final TableModify tableModify = (TableModify) rel;
final RelNode input = tableModify.getInput();
final RelOptCluster cluster = tableModify.getCluster();
final RelTraitSet traitSet = tableModify.getTraitSet().replace(TridentLogicalConvention.INSTANCE);
final RelOptTable relOptTable = tableModify.getTable();
final Prepare.CatalogReader catalogReader = tableModify.getCatalogReader();
final RelNode convertedInput = convert(input, input.getTraitSet().replace(TridentLogicalConvention.INSTANCE));
final TableModify.Operation operation = tableModify.getOperation();
final List<String> updateColumnList = tableModify.getUpdateColumnList();
final List<RexNode> sourceExpressionList = tableModify.getSourceExpressionList();
final boolean flattened = tableModify.isFlattened();
final Table table = tableModify.getTable().unwrap(Table.class);
switch(table.getJdbcTableType()) {
case STREAM:
if (operation != TableModify.Operation.INSERT) {
throw new UnsupportedOperationException(String.format("Streams doesn't support %s modify operation", operation));
}
return new TridentStreamInsertRel(cluster, traitSet, relOptTable, catalogReader, convertedInput, operation, updateColumnList, sourceExpressionList, flattened);
default:
throw new IllegalArgumentException(String.format("Unsupported table type: %s", table.getJdbcTableType()));
}
}
Aggregations