use of org.apache.flink.table.catalog.ConnectorCatalogTable in project flink by apache.
the class TpcdsTestProgram method prepareTableEnv.
/**
* Prepare TableEnvironment for query.
*
* @param sourceTablePath
* @return
*/
private static TableEnvironment prepareTableEnv(String sourceTablePath, Boolean useTableStats) {
// init Table Env
EnvironmentSettings environmentSettings = EnvironmentSettings.inBatchMode();
TableEnvironment tEnv = TableEnvironment.create(environmentSettings);
// config Optimizer parameters
// TODO use the default shuffle mode of batch runtime mode once FLINK-23470 is implemented
tEnv.getConfig().getConfiguration().setString(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE, GlobalStreamExchangeMode.POINTWISE_EDGES_PIPELINED.toString());
tEnv.getConfig().getConfiguration().setLong(OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD, 10 * 1024 * 1024);
tEnv.getConfig().getConfiguration().setBoolean(OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED, true);
// register TPC-DS tables
TPCDS_TABLES.forEach(table -> {
TpcdsSchema schema = TpcdsSchemaProvider.getTableSchema(table);
CsvTableSource.Builder builder = CsvTableSource.builder();
builder.path(sourceTablePath + FILE_SEPARATOR + table + DATA_SUFFIX);
for (int i = 0; i < schema.getFieldNames().size(); i++) {
builder.field(schema.getFieldNames().get(i), TypeConversions.fromDataTypeToLegacyInfo(schema.getFieldTypes().get(i)));
}
builder.fieldDelimiter(COL_DELIMITER);
builder.emptyColumnAsNull();
builder.lineDelimiter("\n");
CsvTableSource tableSource = builder.build();
ConnectorCatalogTable catalogTable = ConnectorCatalogTable.source(tableSource, true);
tEnv.getCatalog(tEnv.getCurrentCatalog()).ifPresent(catalog -> {
try {
catalog.createTable(new ObjectPath(tEnv.getCurrentDatabase(), table), catalogTable, false);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
});
// register statistics info
if (useTableStats) {
TpcdsStatsProvider.registerTpcdsStats(tEnv);
}
return tEnv;
}
use of org.apache.flink.table.catalog.ConnectorCatalogTable in project flink by apache.
the class FlinkCalciteCatalogReader method toPreparingTable.
/**
* Translate this {@link CatalogSchemaTable} into Flink source table.
*/
private static FlinkPreparingTableBase toPreparingTable(RelOptSchema relOptSchema, List<String> names, RelDataType rowType, CatalogSchemaTable schemaTable) {
final ResolvedCatalogBaseTable<?> resolvedBaseTable = schemaTable.getContextResolvedTable().getResolvedTable();
final CatalogBaseTable originTable = resolvedBaseTable.getOrigin();
if (originTable instanceof QueryOperationCatalogView) {
return convertQueryOperationView(relOptSchema, names, rowType, (QueryOperationCatalogView) originTable);
} else if (originTable instanceof ConnectorCatalogTable) {
ConnectorCatalogTable<?, ?> connectorTable = (ConnectorCatalogTable<?, ?>) originTable;
if ((connectorTable).getTableSource().isPresent()) {
return convertLegacyTableSource(relOptSchema, rowType, schemaTable.getContextResolvedTable().getIdentifier(), connectorTable, schemaTable.getStatistic(), schemaTable.isStreamingMode());
} else {
throw new ValidationException("Cannot convert a connector table " + "without source.");
}
} else if (originTable instanceof CatalogView) {
return convertCatalogView(relOptSchema, names, rowType, schemaTable.getStatistic(), (CatalogView) originTable);
} else if (originTable instanceof CatalogTable) {
return convertCatalogTable(relOptSchema, names, rowType, schemaTable);
} else {
throw new ValidationException("Unsupported table type: " + originTable);
}
}
use of org.apache.flink.table.catalog.ConnectorCatalogTable in project flink by apache.
the class TableEnvironmentImpl method registerTableSourceInternal.
@Override
public void registerTableSourceInternal(String name, TableSource<?> tableSource) {
validateTableSource(tableSource);
ObjectIdentifier objectIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(name));
Optional<CatalogBaseTable> table = getTemporaryTable(objectIdentifier);
if (table.isPresent()) {
if (table.get() instanceof ConnectorCatalogTable<?, ?>) {
ConnectorCatalogTable<?, ?> sourceSinkTable = (ConnectorCatalogTable<?, ?>) table.get();
if (sourceSinkTable.getTableSource().isPresent()) {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
} else {
// wrapper contains only sink (not source)
ConnectorCatalogTable sourceAndSink = ConnectorCatalogTable.sourceAndSink(tableSource, sourceSinkTable.getTableSink().get(), !IS_STREAM_TABLE);
catalogManager.dropTemporaryTable(objectIdentifier, false);
catalogManager.createTemporaryTable(sourceAndSink, objectIdentifier, false);
}
} else {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
}
} else {
ConnectorCatalogTable source = ConnectorCatalogTable.source(tableSource, !IS_STREAM_TABLE);
catalogManager.createTemporaryTable(source, objectIdentifier, false);
}
}
use of org.apache.flink.table.catalog.ConnectorCatalogTable in project flink by apache.
the class TableEnvironmentImpl method registerTableSinkInternal.
@Override
public void registerTableSinkInternal(String name, TableSink<?> tableSink) {
ObjectIdentifier objectIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(name));
Optional<CatalogBaseTable> table = getTemporaryTable(objectIdentifier);
if (table.isPresent()) {
if (table.get() instanceof ConnectorCatalogTable<?, ?>) {
ConnectorCatalogTable<?, ?> sourceSinkTable = (ConnectorCatalogTable<?, ?>) table.get();
if (sourceSinkTable.getTableSink().isPresent()) {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
} else {
// wrapper contains only sink (not source)
ConnectorCatalogTable sourceAndSink = ConnectorCatalogTable.sourceAndSink(sourceSinkTable.getTableSource().get(), tableSink, !IS_STREAM_TABLE);
catalogManager.dropTemporaryTable(objectIdentifier, false);
catalogManager.createTemporaryTable(sourceAndSink, objectIdentifier, false);
}
} else {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
}
} else {
ConnectorCatalogTable sink = ConnectorCatalogTable.sink(tableSink, !IS_STREAM_TABLE);
catalogManager.createTemporaryTable(sink, objectIdentifier, false);
}
}
Aggregations