use of org.apache.flink.table.operations.SourceQueryOperation in project flink by apache.
the class TableEnvironmentTest method testTableFromDescriptor.
@Test
public void testTableFromDescriptor() {
final TableEnvironmentMock tEnv = TableEnvironmentMock.getStreamingInstance();
final Schema schema = Schema.newBuilder().column("f0", DataTypes.INT()).build();
final TableDescriptor descriptor = TableDescriptor.forConnector("fake").schema(schema).build();
final Table table = tEnv.from(descriptor);
assertThat(Schema.newBuilder().fromResolvedSchema(table.getResolvedSchema()).build()).isEqualTo(schema);
assertThat(table.getQueryOperation()).asInstanceOf(type(SourceQueryOperation.class)).extracting(SourceQueryOperation::getContextResolvedTable).satisfies(crs -> {
assertThat(crs.isAnonymous()).isTrue();
assertThat(crs.getIdentifier().toList()).hasSize(1);
assertThat(crs.getTable().getOptions()).containsEntry("connector", "fake");
});
assertThat(tEnv.getCatalogManager().listTables()).isEmpty();
}
use of org.apache.flink.table.operations.SourceQueryOperation in project flink by apache.
the class TableEnvironmentImpl method from.
@Override
public Table from(TableDescriptor descriptor) {
Preconditions.checkNotNull(descriptor, "Table descriptor must not be null.");
final ResolvedCatalogTable resolvedCatalogBaseTable = catalogManager.resolveCatalogTable(descriptor.toCatalogTable());
final QueryOperation queryOperation = new SourceQueryOperation(ContextResolvedTable.anonymous(resolvedCatalogBaseTable));
return createTable(queryOperation);
}
use of org.apache.flink.table.operations.SourceQueryOperation in project flink by apache.
the class SqlToOperationConverterTest method checkAlterTableCompact.
private void checkAlterTableCompact(Operation operation, Map<String, String> staticPartitions) {
assertThat(operation).isInstanceOf(SinkModifyOperation.class);
SinkModifyOperation modifyOperation = (SinkModifyOperation) operation;
assertThat(modifyOperation.getStaticPartitions()).containsExactlyInAnyOrderEntriesOf(staticPartitions);
assertThat(modifyOperation.isOverwrite()).isFalse();
assertThat(modifyOperation.getDynamicOptions()).containsEntry(TestManagedTableFactory.ENRICHED_KEY, TestManagedTableFactory.ENRICHED_VALUE);
ContextResolvedTable contextResolvedTable = modifyOperation.getContextResolvedTable();
assertThat(contextResolvedTable.getIdentifier()).isEqualTo(ObjectIdentifier.of("cat1", "db1", "tb1"));
assertThat(modifyOperation.getChild()).isInstanceOf(SourceQueryOperation.class);
SourceQueryOperation child = (SourceQueryOperation) modifyOperation.getChild();
assertThat(child.getChildren()).isEmpty();
assertThat(child.getDynamicOptions()).containsEntry("k", "v");
assertThat(child.getDynamicOptions()).containsEntry(TestManagedTableFactory.ENRICHED_KEY, TestManagedTableFactory.ENRICHED_VALUE);
}
use of org.apache.flink.table.operations.SourceQueryOperation in project flink by apache.
the class SqlToOperationConverter method convertAlterTableCompact.
/**
* Convert `ALTER TABLE ... COMPACT` operation to {@link ModifyOperation} for Flink's managed
* table to trigger a compaction batch job.
*/
private ModifyOperation convertAlterTableCompact(ObjectIdentifier tableIdentifier, ContextResolvedTable contextResolvedTable, SqlAlterTableCompact alterTableCompact) {
Catalog catalog = catalogManager.getCatalog(tableIdentifier.getCatalogName()).orElse(null);
ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
if (ManagedTableListener.isManagedTable(catalog, resolvedCatalogTable)) {
Map<String, String> partitionKVs = alterTableCompact.getPartitionKVs();
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(Collections.emptyMap());
if (partitionKVs != null) {
List<String> partitionKeys = resolvedCatalogTable.getPartitionKeys();
Set<String> validPartitionKeySet = new HashSet<>(partitionKeys);
String exMsg = partitionKeys.isEmpty() ? String.format("Table %s is not partitioned.", tableIdentifier) : String.format("Available ordered partition columns: [%s]", partitionKeys.stream().collect(Collectors.joining("', '", "'", "'")));
partitionKVs.forEach((partitionKey, partitionValue) -> {
if (!validPartitionKeySet.contains(partitionKey)) {
throw new ValidationException(String.format("Partition column '%s' not defined in the table schema. %s", partitionKey, exMsg));
}
});
partitionSpec = new CatalogPartitionSpec(partitionKVs);
}
Map<String, String> compactOptions = catalogManager.resolveCompactManagedTableOptions(resolvedCatalogTable, tableIdentifier, partitionSpec);
QueryOperation child = new SourceQueryOperation(contextResolvedTable, compactOptions);
return new SinkModifyOperation(contextResolvedTable, child, partitionSpec.getPartitionSpec(), false, compactOptions);
}
throw new ValidationException(String.format("ALTER TABLE COMPACT operation is not supported for non-managed table %s", tableIdentifier));
}
Aggregations