use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class RexNodeJsonDeserializer method deserializeFunctionClass.
private static SqlOperator deserializeFunctionClass(JsonNode jsonNode, SerdeContext serdeContext) {
final String className = jsonNode.required(FIELD_NAME_CLASS).asText();
final Class<?> functionClass = loadClass(className, serdeContext, "function");
final UserDefinedFunction functionInstance = UserDefinedFunctionHelper.instantiateFunction(functionClass);
final ContextResolvedFunction resolvedFunction;
// because we never serialize classes for system functions
if (jsonNode.has(FIELD_NAME_CATALOG_NAME)) {
final ObjectIdentifier objectIdentifier = ObjectIdentifierJsonDeserializer.deserialize(jsonNode.required(FIELD_NAME_CATALOG_NAME).asText(), serdeContext);
resolvedFunction = ContextResolvedFunction.permanent(FunctionIdentifier.of(objectIdentifier), functionInstance);
} else {
resolvedFunction = ContextResolvedFunction.anonymous(functionInstance);
}
switch(functionInstance.getKind()) {
case SCALAR:
case TABLE:
return BridgingSqlFunction.of(serdeContext.getFlinkContext(), serdeContext.getTypeFactory(), resolvedFunction);
case AGGREGATE:
return BridgingSqlAggFunction.of(serdeContext.getFlinkContext(), serdeContext.getTypeFactory(), resolvedFunction);
default:
throw new TableException(String.format("Unsupported anonymous function kind '%s' for class '%s'.", functionInstance.getKind(), className));
}
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class AbstractStreamTableEnvironmentImpl method fromStreamInternal.
protected <T> Table fromStreamInternal(DataStream<T> dataStream, @Nullable Schema schema, @Nullable String viewPath, ChangelogMode changelogMode) {
Preconditions.checkNotNull(dataStream, "Data stream must not be null.");
Preconditions.checkNotNull(changelogMode, "Changelog mode must not be null.");
if (dataStream.getExecutionEnvironment() != executionEnvironment) {
throw new ValidationException("The DataStream's StreamExecutionEnvironment must be identical to the one that " + "has been passed to the StreamTableEnvironment during instantiation.");
}
final CatalogManager catalogManager = getCatalogManager();
final OperationTreeBuilder operationTreeBuilder = getOperationTreeBuilder();
final SchemaTranslator.ConsumingResult schemaTranslationResult = SchemaTranslator.createConsumingResult(catalogManager.getDataTypeFactory(), dataStream.getType(), schema);
final ResolvedCatalogTable resolvedCatalogTable = catalogManager.resolveCatalogTable(new ExternalCatalogTable(schemaTranslationResult.getSchema()));
final ContextResolvedTable contextResolvedTable;
if (viewPath != null) {
UnresolvedIdentifier unresolvedIdentifier = getParser().parseIdentifier(viewPath);
final ObjectIdentifier objectIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
contextResolvedTable = ContextResolvedTable.temporary(objectIdentifier, resolvedCatalogTable);
} else {
contextResolvedTable = ContextResolvedTable.anonymous("datastream_source", resolvedCatalogTable);
}
final QueryOperation scanOperation = new ExternalQueryOperation<>(contextResolvedTable, dataStream, schemaTranslationResult.getPhysicalDataType(), schemaTranslationResult.isTopLevelRecord(), changelogMode);
final List<String> projections = schemaTranslationResult.getProjections();
if (projections == null) {
return createTable(scanOperation);
}
final QueryOperation projectOperation = operationTreeBuilder.project(projections.stream().map(ApiExpressionUtils::unresolvedRef).collect(Collectors.toList()), scanOperation);
return createTable(projectOperation);
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class TableEnvironmentImpl method registerTableSourceInternal.
@Override
public void registerTableSourceInternal(String name, TableSource<?> tableSource) {
validateTableSource(tableSource);
ObjectIdentifier objectIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(name));
Optional<CatalogBaseTable> table = getTemporaryTable(objectIdentifier);
if (table.isPresent()) {
if (table.get() instanceof ConnectorCatalogTable<?, ?>) {
ConnectorCatalogTable<?, ?> sourceSinkTable = (ConnectorCatalogTable<?, ?>) table.get();
if (sourceSinkTable.getTableSource().isPresent()) {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
} else {
// wrapper contains only sink (not source)
ConnectorCatalogTable sourceAndSink = ConnectorCatalogTable.sourceAndSink(tableSource, sourceSinkTable.getTableSink().get(), !IS_STREAM_TABLE);
catalogManager.dropTemporaryTable(objectIdentifier, false);
catalogManager.createTemporaryTable(sourceAndSink, objectIdentifier, false);
}
} else {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
}
} else {
ConnectorCatalogTable source = ConnectorCatalogTable.source(tableSource, !IS_STREAM_TABLE);
catalogManager.createTemporaryTable(source, objectIdentifier, false);
}
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class TableEnvironmentImpl method createTemporaryView.
private void createTemporaryView(UnresolvedIdentifier identifier, Table view) {
if (((TableImpl) view).getTableEnvironment() != this) {
throw new TableException("Only table API objects that belong to this TableEnvironment can be registered.");
}
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(identifier);
QueryOperation queryOperation = qualifyQueryOperation(tableIdentifier, view.getQueryOperation());
CatalogBaseTable tableTable = new QueryOperationCatalogView(queryOperation);
catalogManager.createTemporaryTable(tableTable, tableIdentifier, false);
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class TableEnvironmentTest method innerTestManagedTableFromDescriptor.
private void innerTestManagedTableFromDescriptor(boolean ignoreIfExists, boolean isTemporary) {
final TableEnvironmentMock tEnv = TableEnvironmentMock.getStreamingInstance();
final String catalog = tEnv.getCurrentCatalog();
final String database = tEnv.getCurrentDatabase();
final Schema schema = Schema.newBuilder().column("f0", DataTypes.INT()).build();
final String tableName = UUID.randomUUID().toString();
ObjectIdentifier identifier = ObjectIdentifier.of(catalog, database, tableName);
// create table
MANAGED_TABLES.put(identifier, new AtomicReference<>());
CreateTableOperation createOperation = new CreateTableOperation(identifier, TableDescriptor.forManaged().schema(schema).option("a", "Test").build().toCatalogTable(), ignoreIfExists, isTemporary);
tEnv.executeInternal(createOperation);
// test ignore: create again
if (ignoreIfExists) {
tEnv.executeInternal(createOperation);
} else {
assertThatThrownBy(() -> tEnv.executeInternal(createOperation), isTemporary ? "already exists" : "Could not execute CreateTable");
}
// lookup table
boolean isInCatalog = tEnv.getCatalog(catalog).orElseThrow(AssertionError::new).tableExists(new ObjectPath(database, tableName));
if (isTemporary) {
assertThat(isInCatalog).isFalse();
} else {
assertThat(isInCatalog).isTrue();
}
final Optional<ContextResolvedTable> lookupResult = tEnv.getCatalogManager().getTable(identifier);
assertThat(lookupResult.isPresent()).isTrue();
final CatalogBaseTable catalogTable = lookupResult.get().getTable();
assertThat(catalogTable instanceof CatalogTable).isTrue();
assertThat(catalogTable.getUnresolvedSchema()).isEqualTo(schema);
assertThat(catalogTable.getOptions().get("a")).isEqualTo("Test");
assertThat(catalogTable.getOptions().get(ENRICHED_KEY)).isEqualTo(ENRICHED_VALUE);
AtomicReference<Map<String, String>> reference = MANAGED_TABLES.get(identifier);
assertThat(reference.get()).isNotNull();
assertThat(reference.get().get("a")).isEqualTo("Test");
assertThat(reference.get().get(ENRICHED_KEY)).isEqualTo(ENRICHED_VALUE);
DropTableOperation dropOperation = new DropTableOperation(identifier, ignoreIfExists, isTemporary);
tEnv.executeInternal(dropOperation);
assertThat(MANAGED_TABLES.get(identifier).get()).isNull();
// test ignore: drop again
if (ignoreIfExists) {
tEnv.executeInternal(dropOperation);
} else {
assertThatThrownBy(() -> tEnv.executeInternal(dropOperation), "does not exist");
}
MANAGED_TABLES.remove(identifier);
}
Aggregations