use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class ContextResolvedTableJsonDeserializer method deserialize.
@Override
public ContextResolvedTable deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
final CatalogPlanRestore planRestoreOption = SerdeContext.get(ctx).getConfiguration().get(PLAN_RESTORE_CATALOG_OBJECTS);
final CatalogManager catalogManager = SerdeContext.get(ctx).getFlinkContext().getCatalogManager();
final ObjectNode objectNode = jsonParser.readValueAsTree();
// Deserialize the two fields, if available
final ObjectIdentifier identifier = JsonSerdeUtil.deserializeOptionalField(objectNode, FIELD_NAME_IDENTIFIER, ObjectIdentifier.class, jsonParser.getCodec(), ctx).orElse(null);
ResolvedCatalogTable resolvedCatalogTable = JsonSerdeUtil.deserializeOptionalField(objectNode, FIELD_NAME_CATALOG_TABLE, ResolvedCatalogTable.class, jsonParser.getCodec(), ctx).orElse(null);
if (identifier == null && resolvedCatalogTable == null) {
throw new ValidationException(String.format("The input JSON is invalid because it doesn't contain '%s', nor the '%s'.", FIELD_NAME_IDENTIFIER, FIELD_NAME_CATALOG_TABLE));
}
if (identifier == null) {
if (isLookupForced(planRestoreOption)) {
throw missingIdentifier();
}
return ContextResolvedTable.anonymous(resolvedCatalogTable);
}
Optional<ContextResolvedTable> contextResolvedTableFromCatalog = isLookupEnabled(planRestoreOption) ? catalogManager.getTable(identifier) : Optional.empty();
// If we have a schema from the plan and from the catalog, we need to check they match.
if (contextResolvedTableFromCatalog.isPresent() && resolvedCatalogTable != null) {
ResolvedSchema schemaFromPlan = resolvedCatalogTable.getResolvedSchema();
ResolvedSchema schemaFromCatalog = contextResolvedTableFromCatalog.get().getResolvedSchema();
if (!areResolvedSchemasEqual(schemaFromPlan, schemaFromCatalog)) {
throw schemaNotMatching(identifier, schemaFromPlan, schemaFromCatalog);
}
}
if (resolvedCatalogTable == null || isLookupForced(planRestoreOption)) {
if (!isLookupEnabled(planRestoreOption)) {
throw lookupDisabled(identifier);
}
// We use what is stored inside the catalog
return contextResolvedTableFromCatalog.orElseThrow(() -> missingTableFromCatalog(identifier, isLookupForced(planRestoreOption)));
}
if (contextResolvedTableFromCatalog.isPresent()) {
// SCHEMA, so we just need to return the catalog query result
if (objectNode.at("/" + FIELD_NAME_CATALOG_TABLE + "/" + OPTIONS).isMissingNode()) {
return contextResolvedTableFromCatalog.get();
}
return contextResolvedTableFromCatalog.flatMap(ContextResolvedTable::getCatalog).map(c -> ContextResolvedTable.permanent(identifier, c, resolvedCatalogTable)).orElseGet(() -> ContextResolvedTable.temporary(identifier, resolvedCatalogTable));
}
return ContextResolvedTable.temporary(identifier, resolvedCatalogTable);
}
use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class QueryOperationTest method testSummaryString.
@Test
public void testSummaryString() {
ResolvedSchema schema = ResolvedSchema.physical(Collections.singletonList("a"), Collections.singletonList(DataTypes.INT()));
ProjectQueryOperation tableOperation = new ProjectQueryOperation(Collections.singletonList(new FieldReferenceExpression("a", DataTypes.INT(), 0, 0)), new SourceQueryOperation(ContextResolvedTable.temporary(ObjectIdentifier.of("cat1", "db1", "tab1"), new ResolvedCatalogTable(CatalogTable.of(Schema.newBuilder().build(), null, Collections.emptyList(), Collections.emptyMap()), schema))), schema);
SetQueryOperation unionQueryOperation = new SetQueryOperation(tableOperation, tableOperation, SetQueryOperation.SetQueryOperationType.UNION, true, schema);
assertEquals("Union: (all: [true])\n" + " Project: (projections: [a])\n" + " CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])\n" + " Project: (projections: [a])\n" + " CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])", unionQueryOperation.asSummaryString());
}
use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class AbstractStreamTableEnvironmentImpl method toStreamInternal.
protected <T> DataStream<T> toStreamInternal(Table table, SchemaTranslator.ProducingResult schemaTranslationResult, @Nullable ChangelogMode changelogMode) {
final CatalogManager catalogManager = getCatalogManager();
final OperationTreeBuilder operationTreeBuilder = getOperationTreeBuilder();
final QueryOperation projectOperation = schemaTranslationResult.getProjections().map(projections -> operationTreeBuilder.project(projections.stream().map(ApiExpressionUtils::unresolvedRef).collect(Collectors.toList()), table.getQueryOperation())).orElseGet(table::getQueryOperation);
final ResolvedCatalogTable resolvedCatalogTable = catalogManager.resolveCatalogTable(new ExternalCatalogTable(schemaTranslationResult.getSchema()));
final ExternalModifyOperation modifyOperation = new ExternalModifyOperation(ContextResolvedTable.anonymous("datastream_sink", resolvedCatalogTable), projectOperation, changelogMode, schemaTranslationResult.getPhysicalDataType().orElseGet(() -> resolvedCatalogTable.getResolvedSchema().toPhysicalRowDataType()));
return toStreamInternal(table, modifyOperation);
}
use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class AbstractStreamTableEnvironmentImpl method fromStreamInternal.
protected <T> Table fromStreamInternal(DataStream<T> dataStream, @Nullable Schema schema, @Nullable String viewPath, ChangelogMode changelogMode) {
Preconditions.checkNotNull(dataStream, "Data stream must not be null.");
Preconditions.checkNotNull(changelogMode, "Changelog mode must not be null.");
if (dataStream.getExecutionEnvironment() != executionEnvironment) {
throw new ValidationException("The DataStream's StreamExecutionEnvironment must be identical to the one that " + "has been passed to the StreamTableEnvironment during instantiation.");
}
final CatalogManager catalogManager = getCatalogManager();
final OperationTreeBuilder operationTreeBuilder = getOperationTreeBuilder();
final SchemaTranslator.ConsumingResult schemaTranslationResult = SchemaTranslator.createConsumingResult(catalogManager.getDataTypeFactory(), dataStream.getType(), schema);
final ResolvedCatalogTable resolvedCatalogTable = catalogManager.resolveCatalogTable(new ExternalCatalogTable(schemaTranslationResult.getSchema()));
final ContextResolvedTable contextResolvedTable;
if (viewPath != null) {
UnresolvedIdentifier unresolvedIdentifier = getParser().parseIdentifier(viewPath);
final ObjectIdentifier objectIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
contextResolvedTable = ContextResolvedTable.temporary(objectIdentifier, resolvedCatalogTable);
} else {
contextResolvedTable = ContextResolvedTable.anonymous("datastream_source", resolvedCatalogTable);
}
final QueryOperation scanOperation = new ExternalQueryOperation<>(contextResolvedTable, dataStream, schemaTranslationResult.getPhysicalDataType(), schemaTranslationResult.isTopLevelRecord(), changelogMode);
final List<String> projections = schemaTranslationResult.getProjections();
if (projections == null) {
return createTable(scanOperation);
}
final QueryOperation projectOperation = operationTreeBuilder.project(projections.stream().map(ApiExpressionUtils::unresolvedRef).collect(Collectors.toList()), scanOperation);
return createTable(projectOperation);
}
use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class TableEnvironmentImpl method from.
@Override
public Table from(TableDescriptor descriptor) {
Preconditions.checkNotNull(descriptor, "Table descriptor must not be null.");
final ResolvedCatalogTable resolvedCatalogBaseTable = catalogManager.resolveCatalogTable(descriptor.toCatalogTable());
final QueryOperation queryOperation = new SourceQueryOperation(ContextResolvedTable.anonymous(resolvedCatalogBaseTable));
return createTable(queryOperation);
}
Aggregations