use of org.apache.flink.table.operations.QueryOperation in project flink by apache.
the class TableEnvironmentImpl method from.
@Override
public Table from(TableDescriptor descriptor) {
Preconditions.checkNotNull(descriptor, "Table descriptor must not be null.");
final ResolvedCatalogTable resolvedCatalogBaseTable = catalogManager.resolveCatalogTable(descriptor.toCatalogTable());
final QueryOperation queryOperation = new SourceQueryOperation(ContextResolvedTable.anonymous(resolvedCatalogBaseTable));
return createTable(queryOperation);
}
use of org.apache.flink.table.operations.QueryOperation in project flink by apache.
the class InConverter method convert.
@Override
public RexNode convert(CallExpression call, CallExpressionConvertRule.ConvertContext context) {
checkArgument(call, call.getChildren().size() > 1);
Expression headExpr = call.getChildren().get(1);
if (headExpr instanceof TableReferenceExpression) {
QueryOperation tableOperation = ((TableReferenceExpression) headExpr).getQueryOperation();
RexNode child = context.toRexNode(call.getChildren().get(0));
return RexSubQuery.in(((FlinkRelBuilder) context.getRelBuilder()).queryOperation(tableOperation).build(), ImmutableList.of(child));
} else {
List<RexNode> child = toRexNodes(context, call.getChildren());
return context.getRelBuilder().getRexBuilder().makeIn(child.get(0), child.subList(1, child.size()));
}
}
use of org.apache.flink.table.operations.QueryOperation in project flink by apache.
the class SqlToOperationConverter method convertAlterTableCompact.
/**
* Convert `ALTER TABLE ... COMPACT` operation to {@link ModifyOperation} for Flink's managed
* table to trigger a compaction batch job.
*/
private ModifyOperation convertAlterTableCompact(ObjectIdentifier tableIdentifier, ContextResolvedTable contextResolvedTable, SqlAlterTableCompact alterTableCompact) {
Catalog catalog = catalogManager.getCatalog(tableIdentifier.getCatalogName()).orElse(null);
ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
if (ManagedTableListener.isManagedTable(catalog, resolvedCatalogTable)) {
Map<String, String> partitionKVs = alterTableCompact.getPartitionKVs();
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(Collections.emptyMap());
if (partitionKVs != null) {
List<String> partitionKeys = resolvedCatalogTable.getPartitionKeys();
Set<String> validPartitionKeySet = new HashSet<>(partitionKeys);
String exMsg = partitionKeys.isEmpty() ? String.format("Table %s is not partitioned.", tableIdentifier) : String.format("Available ordered partition columns: [%s]", partitionKeys.stream().collect(Collectors.joining("', '", "'", "'")));
partitionKVs.forEach((partitionKey, partitionValue) -> {
if (!validPartitionKeySet.contains(partitionKey)) {
throw new ValidationException(String.format("Partition column '%s' not defined in the table schema. %s", partitionKey, exMsg));
}
});
partitionSpec = new CatalogPartitionSpec(partitionKVs);
}
Map<String, String> compactOptions = catalogManager.resolveCompactManagedTableOptions(resolvedCatalogTable, tableIdentifier, partitionSpec);
QueryOperation child = new SourceQueryOperation(contextResolvedTable, compactOptions);
return new SinkModifyOperation(contextResolvedTable, child, partitionSpec.getPartitionSpec(), false, compactOptions);
}
throw new ValidationException(String.format("ALTER TABLE COMPACT operation is not supported for non-managed table %s", tableIdentifier));
}
Aggregations