use of org.apache.flink.table.catalog.CatalogBaseTable in project flink-mirror by flink-ci.
the class HiveCatalogTest method testRetrieveFlinkProperties.
@Test
public void testRetrieveFlinkProperties() throws Exception {
ObjectPath hiveObjectPath = new ObjectPath(HiveCatalog.DEFAULT_DB, "testRetrieveProperties");
Map<String, String> options = getLegacyFileSystemConnectorOptions("/test_path");
options.put(CONNECTOR.key(), "jdbc");
options.put("url", "jdbc:clickhouse://host:port/testUrl1");
options.put("flink.url", "jdbc:clickhouse://host:port/testUrl2");
hiveCatalog.createTable(hiveObjectPath, new CatalogTableImpl(schema, options, null), false);
CatalogBaseTable hiveTable = hiveCatalog.getTable(hiveObjectPath);
assertThat(hiveTable.getOptions()).containsEntry("url", "jdbc:clickhouse://host:port/testUrl1");
assertThat(hiveTable.getOptions()).containsEntry("flink.url", "jdbc:clickhouse://host:port/testUrl2");
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink-mirror by flink-ci.
the class HiveCatalogTest method testCreateAndGetFlinkManagedTable.
@Test
public void testCreateAndGetFlinkManagedTable() throws Exception {
CatalogTable table = new CatalogTableImpl(schema, Collections.emptyMap(), "Flink managed table");
hiveCatalog.createTable(tablePath, table, false);
Table hiveTable = hiveCatalog.getHiveTable(tablePath);
assertThat(hiveTable.getParameters()).containsEntry(FLINK_PROPERTY_PREFIX + CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
CatalogBaseTable retrievedTable = hiveCatalog.instantiateCatalogTable(hiveTable);
assertThat(retrievedTable.getOptions()).isEmpty();
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink-mirror by flink-ci.
the class TableEnvHiveConnectorITCase method testNotNullConstraints.
@Test
public void testNotNullConstraints() throws Exception {
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.tbl (x int,y bigint not null enable rely,z string not null enable norely)");
CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl"));
TableSchema tableSchema = catalogTable.getSchema();
assertTrue("By default columns should be nullable", tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable());
assertFalse("NOT NULL columns should be reflected in table schema", tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable());
assertTrue("NOT NULL NORELY columns should be considered nullable", tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink-mirror by flink-ci.
the class TableEnvHiveConnectorITCase method testPKConstraint.
@Test
public void testPKConstraint() throws Exception {
// While PK constraints are supported since Hive 2.1.0, the constraints cannot be RELY in
// 2.x versions.
// So let's only test for 3.x.
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
// test rely PK constraints
tableEnv.executeSql("create table db1.tbl1 (x tinyint,y smallint,z int, primary key (x,z) disable novalidate rely)");
CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl1"));
TableSchema tableSchema = catalogTable.getSchema();
assertTrue(tableSchema.getPrimaryKey().isPresent());
UniqueConstraint pk = tableSchema.getPrimaryKey().get();
assertEquals(2, pk.getColumns().size());
assertTrue(pk.getColumns().containsAll(Arrays.asList("x", "z")));
// test norely PK constraints
tableEnv.executeSql("create table db1.tbl2 (x tinyint,y smallint, primary key (x) disable norely)");
catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl2"));
tableSchema = catalogTable.getSchema();
assertFalse(tableSchema.getPrimaryKey().isPresent());
// test table w/o PK
tableEnv.executeSql("create table db1.tbl3 (x tinyint)");
catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl3"));
tableSchema = catalogTable.getSchema();
assertFalse(tableSchema.getPrimaryKey().isPresent());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.CatalogBaseTable in project flink-mirror by flink-ci.
the class TableEnvironmentImpl method registerTableSourceInternal.
@Override
public void registerTableSourceInternal(String name, TableSource<?> tableSource) {
validateTableSource(tableSource);
ObjectIdentifier objectIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(name));
Optional<CatalogBaseTable> table = getTemporaryTable(objectIdentifier);
if (table.isPresent()) {
if (table.get() instanceof ConnectorCatalogTable<?, ?>) {
ConnectorCatalogTable<?, ?> sourceSinkTable = (ConnectorCatalogTable<?, ?>) table.get();
if (sourceSinkTable.getTableSource().isPresent()) {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
} else {
// wrapper contains only sink (not source)
ConnectorCatalogTable sourceAndSink = ConnectorCatalogTable.sourceAndSink(tableSource, sourceSinkTable.getTableSink().get(), !IS_STREAM_TABLE);
catalogManager.dropTemporaryTable(objectIdentifier, false);
catalogManager.createTemporaryTable(sourceAndSink, objectIdentifier, false);
}
} else {
throw new ValidationException(String.format("Table '%s' already exists. Please choose a different name.", name));
}
} else {
ConnectorCatalogTable source = ConnectorCatalogTable.source(tableSource, !IS_STREAM_TABLE);
catalogManager.createTemporaryTable(source, objectIdentifier, false);
}
}
Aggregations