use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveLookupJoinITCase method testPartitionFetcherAndReader.
@Test
public void testPartitionFetcherAndReader() throws Exception {
// constructs test data using dynamic partition
TableEnvironment batchEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
batchEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
batchEnv.useCatalog(hiveCatalog.getName());
batchEnv.executeSql("insert overwrite partition_table values " + "(1,'a',08,2019,'08','01')," + "(1,'a',10,2020,'08','31')," + "(2,'a',21,2020,'08','31')," + "(2,'b',22,2020,'08','31')," + "(3,'c',33,2020,'09','31')").await();
FileSystemLookupFunction<HiveTablePartition> lookupFunction = getLookupFunction("partition_table");
lookupFunction.open(null);
PartitionFetcher<HiveTablePartition> fetcher = lookupFunction.getPartitionFetcher();
PartitionFetcher.Context<HiveTablePartition> context = lookupFunction.getFetcherContext();
List<HiveTablePartition> partitions = fetcher.fetch(context);
// fetch latest partition by partition-name
assertEquals(1, partitions.size());
PartitionReader<HiveTablePartition, RowData> reader = lookupFunction.getPartitionReader();
reader.open(partitions);
List<RowData> res = new ArrayList<>();
ObjectIdentifier tableIdentifier = ObjectIdentifier.of(hiveCatalog.getName(), "default", "partition_table");
CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(tableIdentifier.toObjectPath());
GenericRowData reuse = new GenericRowData(catalogTable.getSchema().getFieldCount());
TypeSerializer<RowData> serializer = InternalSerializers.create(catalogTable.getSchema().toRowDataType().getLogicalType());
RowData row;
while ((row = reader.read(reuse)) != null) {
res.add(serializer.copy(row));
}
res.sort(Comparator.comparingInt(o -> o.getInt(0)));
assertEquals("[+I(3,c,33,2020,09,31)]", res.toString());
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testAlterHiveTableToFlinkManagedTable.
@Test
public void testAlterHiveTableToFlinkManagedTable() throws Exception {
Map<String, String> originOptions = getLegacyFileSystemConnectorOptions("/test_path");
originOptions.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
CatalogTable originTable = new CatalogTableImpl(schema, originOptions, "Hive table");
hiveCatalog.createTable(tablePath, originTable, false);
Map<String, String> newOptions = Collections.emptyMap();
CatalogTable newTable = new CatalogTableImpl(schema, newOptions, "Flink managed table");
assertThatThrownBy(() -> hiveCatalog.alterTable(tablePath, newTable, false)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("Changing catalog table type is not allowed. " + "Existing table type is 'HIVE_TABLE', but new table type is 'FLINK_MANAGED_TABLE'");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testAlterFlinkNonManagedTableToFlinkManagedTable.
@Test
public void testAlterFlinkNonManagedTableToFlinkManagedTable() throws Exception {
Map<String, String> originOptions = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), DataGenTableSourceFactory.IDENTIFIER);
CatalogTable originTable = new CatalogTableImpl(schema, originOptions, "Flink non-managed table");
hiveCatalog.createTable(tablePath, originTable, false);
Map<String, String> newOptions = Collections.emptyMap();
CatalogTable newTable = new CatalogTableImpl(schema, newOptions, "Flink managed table");
assertThatThrownBy(() -> hiveCatalog.alterTable(tablePath, newTable, false)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("Changing catalog table type is not allowed. " + "Existing table type is 'FLINK_NON_MANAGED_TABLE', but new table type is 'FLINK_MANAGED_TABLE'");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testAlterFlinkManagedTableToFlinkManagedTable.
@Test
public void testAlterFlinkManagedTableToFlinkManagedTable() throws Exception {
Map<String, String> originOptions = Collections.emptyMap();
CatalogTable originTable = new CatalogTableImpl(schema, originOptions, "Flink managed table");
hiveCatalog.createTable(tablePath, originTable, false);
Map<String, String> newOptions = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), DataGenTableSourceFactory.IDENTIFIER);
CatalogTable newTable = new CatalogTableImpl(schema, newOptions, "Flink non-managed table");
assertThatThrownBy(() -> hiveCatalog.alterTable(tablePath, newTable, false)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("Changing catalog table type is not allowed. " + "Existing table type is 'FLINK_MANAGED_TABLE', but new table type is 'FLINK_NON_MANAGED_TABLE'");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testCreateAndGetFlinkManagedTable.
@Test
public void testCreateAndGetFlinkManagedTable() throws Exception {
CatalogTable table = new CatalogTableImpl(schema, Collections.emptyMap(), "Flink managed table");
hiveCatalog.createTable(tablePath, table, false);
Table hiveTable = hiveCatalog.getHiveTable(tablePath);
assertThat(hiveTable.getParameters()).containsEntry(FLINK_PROPERTY_PREFIX + CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
CatalogBaseTable retrievedTable = hiveCatalog.instantiateCatalogTable(hiveTable);
assertThat(retrievedTable.getOptions()).isEmpty();
}
Aggregations