use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by splunk.
the class FlinkCalciteCatalogReader method toPreparingTable.
/**
* Translate this {@link CatalogSchemaTable} into Flink source table.
*/
private static FlinkPreparingTableBase toPreparingTable(RelOptSchema relOptSchema, List<String> names, RelDataType rowType, CatalogSchemaTable schemaTable) {
final ResolvedCatalogBaseTable<?> resolvedBaseTable = schemaTable.getContextResolvedTable().getResolvedTable();
final CatalogBaseTable originTable = resolvedBaseTable.getOrigin();
if (originTable instanceof QueryOperationCatalogView) {
return convertQueryOperationView(relOptSchema, names, rowType, (QueryOperationCatalogView) originTable);
} else if (originTable instanceof ConnectorCatalogTable) {
ConnectorCatalogTable<?, ?> connectorTable = (ConnectorCatalogTable<?, ?>) originTable;
if ((connectorTable).getTableSource().isPresent()) {
return convertLegacyTableSource(relOptSchema, rowType, schemaTable.getContextResolvedTable().getIdentifier(), connectorTable, schemaTable.getStatistic(), schemaTable.isStreamingMode());
} else {
throw new ValidationException("Cannot convert a connector table " + "without source.");
}
} else if (originTable instanceof CatalogView) {
return convertCatalogView(relOptSchema, names, rowType, schemaTable.getStatistic(), (CatalogView) originTable);
} else if (originTable instanceof CatalogTable) {
return convertCatalogTable(relOptSchema, names, rowType, schemaTable);
} else {
throw new ValidationException("Unsupported table type: " + originTable);
}
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project hudi by apache.
the class TestHoodieCatalog method testGetTable.
@Test
public void testGetTable() throws Exception {
ObjectPath tablePath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb1");
// create table
catalog.createTable(tablePath, EXPECTED_CATALOG_TABLE, true);
Map<String, String> expectedOptions = new HashMap<>(EXPECTED_OPTIONS);
expectedOptions.put(FlinkOptions.TABLE_TYPE.key(), FlinkOptions.TABLE_TYPE_MERGE_ON_READ);
expectedOptions.put(FlinkOptions.INDEX_GLOBAL_ENABLED.key(), "false");
expectedOptions.put(FlinkOptions.PRE_COMBINE.key(), "true");
expectedOptions.put("connector", "hudi");
expectedOptions.put(FlinkOptions.PATH.key(), String.format("%s/%s/%s", tempFile.getAbsolutePath(), tablePath.getDatabaseName(), tablePath.getObjectName()));
// test get table
CatalogBaseTable actualTable = catalog.getTable(tablePath);
// validate schema
Schema actualSchema = actualTable.getUnresolvedSchema();
Schema expectedSchema = Schema.newBuilder().fromResolvedSchema(EXPECTED_TABLE_SCHEMA).build();
assertEquals(expectedSchema, actualSchema);
// validate options
Map<String, String> actualOptions = actualTable.getOptions();
assertEquals(expectedOptions, actualOptions);
// validate comment
assertEquals(EXPECTED_CATALOG_TABLE.getComment(), actualTable.getComment());
// validate partition key
assertEquals(EXPECTED_CATALOG_TABLE.getPartitionKeys(), ((CatalogTable) actualTable).getPartitionKeys());
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.
the class HiveCatalog method createTable.
@Override
public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException, CatalogException {
checkNotNull(tablePath, "tablePath cannot be null");
checkNotNull(table, "table cannot be null");
if (!databaseExists(tablePath.getDatabaseName())) {
throw new DatabaseNotExistException(getName(), tablePath.getDatabaseName());
}
boolean managedTable = ManagedTableListener.isManagedTable(this, table);
Table hiveTable = HiveTableUtil.instantiateHiveTable(tablePath, table, hiveConf, managedTable);
UniqueConstraint pkConstraint = null;
List<String> notNullCols = new ArrayList<>();
boolean isHiveTable = isHiveTable(table.getOptions());
if (isHiveTable) {
pkConstraint = table.getSchema().getPrimaryKey().orElse(null);
String nnColStr = hiveTable.getParameters().remove(NOT_NULL_COLS);
if (nnColStr != null) {
notNullCols.addAll(Arrays.asList(nnColStr.split(HiveDDLUtils.COL_DELIMITER)));
} else {
for (int i = 0; i < table.getSchema().getFieldDataTypes().length; i++) {
if (!table.getSchema().getFieldDataTypes()[i].getLogicalType().isNullable()) {
notNullCols.add(table.getSchema().getFieldNames()[i]);
}
}
}
// remove the 'connector' option for hive table
hiveTable.getParameters().remove(CONNECTOR.key());
}
try {
if (pkConstraint != null || !notNullCols.isEmpty()) {
// extract constraint traits from table properties
String pkTraitStr = hiveTable.getParameters().remove(PK_CONSTRAINT_TRAIT);
byte pkTrait = pkTraitStr == null ? HiveDDLUtils.defaultTrait() : Byte.parseByte(pkTraitStr);
List<Byte> pkTraits = Collections.nCopies(pkConstraint == null ? 0 : pkConstraint.getColumns().size(), pkTrait);
List<Byte> nnTraits;
String nnTraitsStr = hiveTable.getParameters().remove(NOT_NULL_CONSTRAINT_TRAITS);
if (nnTraitsStr != null) {
String[] traits = nnTraitsStr.split(HiveDDLUtils.COL_DELIMITER);
Preconditions.checkArgument(traits.length == notNullCols.size(), "Number of NOT NULL columns and constraint traits mismatch");
nnTraits = Arrays.stream(traits).map(Byte::new).collect(Collectors.toList());
} else {
nnTraits = Collections.nCopies(notNullCols.size(), HiveDDLUtils.defaultTrait());
}
client.createTableWithConstraints(hiveTable, hiveConf, pkConstraint, pkTraits, notNullCols, nnTraits);
} else {
client.createTable(hiveTable);
}
} catch (AlreadyExistsException e) {
if (!ignoreIfExists) {
throw new TableAlreadyExistException(getName(), tablePath, e);
}
} catch (TException e) {
throw new CatalogException(String.format("Failed to create table %s", tablePath.getFullName()), e);
}
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.
the class HiveCatalogTest method testRetrieveFlinkProperties.
@Test
public void testRetrieveFlinkProperties() throws Exception {
ObjectPath hiveObjectPath = new ObjectPath(HiveCatalog.DEFAULT_DB, "testRetrieveProperties");
Map<String, String> options = getLegacyFileSystemConnectorOptions("/test_path");
options.put(CONNECTOR.key(), "jdbc");
options.put("url", "jdbc:clickhouse://host:port/testUrl1");
options.put("flink.url", "jdbc:clickhouse://host:port/testUrl2");
hiveCatalog.createTable(hiveObjectPath, new CatalogTableImpl(schema, options, null), false);
CatalogBaseTable hiveTable = hiveCatalog.getTable(hiveObjectPath);
assertThat(hiveTable.getOptions()).containsEntry("url", "jdbc:clickhouse://host:port/testUrl1");
assertThat(hiveTable.getOptions()).containsEntry("flink.url", "jdbc:clickhouse://host:port/testUrl2");
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.
the class HiveCatalogTest method testGetNoSchemaGenericTable.
@Test
public void testGetNoSchemaGenericTable() throws Exception {
ObjectPath hiveObjectPath = new ObjectPath(HiveCatalog.DEFAULT_DB, "testGetNoSchemaGenericTable");
Map<String, String> properties = new HashMap<>();
properties.put(CONNECTOR.key(), "jdbc");
hiveCatalog.createTable(hiveObjectPath, new CatalogTableImpl(TableSchema.builder().build(), properties, null), false);
CatalogBaseTable catalogTable = hiveCatalog.getTable(hiveObjectPath);
assertThat(catalogTable.getSchema()).isEqualTo(TableSchema.builder().build());
}
Aggregations