use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveCatalogTest method testCreateHiveTable.
@Test
public void testCreateHiveTable() {
Map<String, String> options = getLegacyFileSystemConnectorOptions("/test_path");
options.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
Table hiveTable = HiveTableUtil.instantiateHiveTable(new ObjectPath("test", "test"), new CatalogTableImpl(schema, options, null), HiveTestUtils.createHiveConf(), false);
Map<String, String> prop = hiveTable.getParameters();
assertThat(HiveCatalog.isHiveTable(prop)).isTrue();
assertThat(prop.keySet()).noneMatch(k -> k.startsWith(CatalogPropertiesUtil.FLINK_PROPERTY_PREFIX));
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveSourceITCase method testRegularRead.
@Test
public void testRegularRead() throws Exception {
// test non-partitioned table
ObjectPath tablePath = new ObjectPath("default", "tbl1");
Map<String, String> tableOptions = new HashMap<>();
tableOptions.put(CONNECTOR.key(), IDENTIFIER);
hiveCatalog.createTable(tablePath, new CatalogTableImpl(TableSchema.builder().field("i", DataTypes.INT()).build(), tableOptions, null), false);
HiveTestUtils.createTextTableInserter(hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()).addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit();
StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
streamEnv.setParallelism(1);
HiveSource<RowData> hiveSource = new HiveSourceBuilder(new JobConf(hiveCatalog.getHiveConf()), new Configuration(), HiveShimLoader.getHiveVersion(), tablePath.getDatabaseName(), tablePath.getObjectName(), Collections.emptyMap()).buildWithDefaultBulkFormat();
List<RowData> results = CollectionUtil.iteratorToList(streamEnv.fromSource(hiveSource, WatermarkStrategy.noWatermarks(), "HiveSource-tbl1").executeAndCollect());
assertEquals(2, results.size());
assertEquals(1, results.get(0).getInt(0));
assertEquals(2, results.get(1).getInt(0));
hiveCatalog.dropTable(tablePath, false);
// test partitioned table
tablePath = new ObjectPath("default", "tbl2");
hiveCatalog.createTable(tablePath, new CatalogTableImpl(TableSchema.builder().field("i", DataTypes.INT()).field("p", DataTypes.STRING()).build(), Collections.singletonList("p"), tableOptions, null), false);
HiveTestUtils.createTextTableInserter(hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()).addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit("p='a'");
hiveSource = new HiveSourceBuilder(new JobConf(hiveCatalog.getHiveConf()), new Configuration(), HiveShimLoader.getHiveVersion(), tablePath.getDatabaseName(), tablePath.getObjectName(), Collections.emptyMap()).setLimit(1L).buildWithDefaultBulkFormat();
results = CollectionUtil.iteratorToList(streamEnv.fromSource(hiveSource, WatermarkStrategy.noWatermarks(), "HiveSource-tbl2").executeAndCollect());
assertEquals(1, results.size());
assertEquals(1, results.get(0).getInt(0));
assertEquals("a", results.get(0).getString(1).toString());
HiveTestUtils.createTextTableInserter(hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()).addRow(new Object[] { 3 }).commit("p='b'");
LinkedHashMap<String, String> spec = new LinkedHashMap<>();
spec.put("p", "b");
hiveSource = new HiveSourceBuilder(new JobConf(hiveCatalog.getHiveConf()), new Configuration(), null, tablePath.getDatabaseName(), tablePath.getObjectName(), Collections.emptyMap()).setPartitions(Collections.singletonList(HiveTablePartition.ofPartition(hiveCatalog.getHiveConf(), hiveCatalog.getHiveVersion(), tablePath.getDatabaseName(), tablePath.getObjectName(), spec))).buildWithDefaultBulkFormat();
results = CollectionUtil.iteratorToList(streamEnv.fromSource(hiveSource, WatermarkStrategy.noWatermarks(), "HiveSource-tbl2").executeAndCollect());
assertEquals(1, results.size());
assertEquals(3, results.get(0).getInt(0));
assertEquals("b", results.get(0).getString(1).toString());
hiveCatalog.dropTable(tablePath, false);
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveTableFactoryTest method testGenericTable.
@Test
public void testGenericTable() throws Exception {
final TableSchema schema = TableSchema.builder().field("name", DataTypes.STRING()).field("age", DataTypes.INT()).build();
catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
final Map<String, String> options = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), "COLLECTION");
final CatalogTable table = new CatalogTableImpl(schema, options, "csv table");
catalog.createTable(new ObjectPath("mydb", "mytable"), table, true);
final Optional<TableFactory> tableFactoryOpt = catalog.getTableFactory();
assertTrue(tableFactoryOpt.isPresent());
final HiveTableFactory tableFactory = (HiveTableFactory) tableFactoryOpt.get();
final TableSource tableSource = tableFactory.createTableSource(new TableSourceFactoryContextImpl(ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration(), false));
assertTrue(tableSource instanceof StreamTableSource);
final TableSink tableSink = tableFactory.createTableSink(new TableSinkFactoryContextImpl(ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration(), true, false));
assertTrue(tableSink instanceof StreamTableSink);
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveTableFactoryTest method testHiveTable.
@Test
public void testHiveTable() throws Exception {
final ResolvedSchema schema = ResolvedSchema.of(Column.physical("name", DataTypes.STRING()), Column.physical("age", DataTypes.INT()));
catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
final Map<String, String> options = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
final CatalogTable table = new CatalogTableImpl(TableSchema.fromResolvedSchema(schema), options, "hive table");
catalog.createTable(new ObjectPath("mydb", "mytable"), table, true);
final DynamicTableSource tableSource = FactoryUtil.createDynamicTableSource((DynamicTableSourceFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSource instanceof HiveTableSource);
final DynamicTableSink tableSink = FactoryUtil.createDynamicTableSink((DynamicTableSinkFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSink instanceof HiveTableSink);
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveCatalogGenericMetadataTest method testGenericTableSchema.
// ------ tables ------
@Test
public void testGenericTableSchema() throws Exception {
catalog.createDatabase(db1, createDb(), false);
TableSchema tableSchema = TableSchema.builder().fields(new String[] { "col1", "col2", "col3" }, new DataType[] { DataTypes.TIMESTAMP(3), DataTypes.TIMESTAMP(6), DataTypes.TIMESTAMP(9) }).watermark("col3", "col3", DataTypes.TIMESTAMP(9)).build();
ObjectPath tablePath = new ObjectPath(db1, "generic_table");
try {
catalog.createTable(tablePath, new CatalogTableImpl(tableSchema, getBatchTableProperties(), TEST_COMMENT), false);
assertEquals(tableSchema, catalog.getTable(tablePath).getSchema());
} finally {
catalog.dropTable(tablePath, true);
}
}
Aggregations