use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testAlterFlinkManagedTableToHiveTable.
@Test
public void testAlterFlinkManagedTableToHiveTable() throws Exception {
Map<String, String> originOptions = Collections.emptyMap();
CatalogTable originTable = new CatalogTableImpl(schema, originOptions, "Flink managed table");
hiveCatalog.createTable(tablePath, originTable, false);
Map<String, String> newOptions = getLegacyFileSystemConnectorOptions("/test_path");
newOptions.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
CatalogTable newTable = new CatalogTableImpl(schema, newOptions, "Hive table");
assertThatThrownBy(() -> hiveCatalog.alterTable(tablePath, newTable, false)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("Changing catalog table type is not allowed. " + "Existing table type is 'FLINK_MANAGED_TABLE', but new table type is 'HIVE_TABLE'");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveTableFactoryTest method testGenericTable.
@Test
public void testGenericTable() throws Exception {
final TableSchema schema = TableSchema.builder().field("name", DataTypes.STRING()).field("age", DataTypes.INT()).build();
catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
final Map<String, String> options = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), "COLLECTION");
final CatalogTable table = new CatalogTableImpl(schema, options, "csv table");
catalog.createTable(new ObjectPath("mydb", "mytable"), table, true);
final Optional<TableFactory> tableFactoryOpt = catalog.getTableFactory();
assertTrue(tableFactoryOpt.isPresent());
final HiveTableFactory tableFactory = (HiveTableFactory) tableFactoryOpt.get();
final TableSource tableSource = tableFactory.createTableSource(new TableSourceFactoryContextImpl(ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration(), false));
assertTrue(tableSource instanceof StreamTableSource);
final TableSink tableSink = tableFactory.createTableSink(new TableSinkFactoryContextImpl(ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration(), true, false));
assertTrue(tableSink instanceof StreamTableSink);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveTableFactoryTest method testHiveTable.
@Test
public void testHiveTable() throws Exception {
final ResolvedSchema schema = ResolvedSchema.of(Column.physical("name", DataTypes.STRING()), Column.physical("age", DataTypes.INT()));
catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
final Map<String, String> options = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
final CatalogTable table = new CatalogTableImpl(TableSchema.fromResolvedSchema(schema), options, "hive table");
catalog.createTable(new ObjectPath("mydb", "mytable"), table, true);
final DynamicTableSource tableSource = FactoryUtil.createDynamicTableSource((DynamicTableSourceFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSource instanceof HiveTableSource);
final DynamicTableSink tableSink = FactoryUtil.createDynamicTableSink((DynamicTableSinkFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSink instanceof HiveTableSink);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogGenericMetadataTest method testGenericTableWithoutConnectorProp.
@Test
public void testGenericTableWithoutConnectorProp() throws Exception {
catalog.createDatabase(db1, createDb(), false);
TableSchema tableSchema = TableSchema.builder().fields(new String[] { "s", "ts" }, new DataType[] { DataTypes.STRING(), DataTypes.TIMESTAMP_LTZ(3) }).watermark("ts", "ts-INTERVAL '1' SECOND", DataTypes.TIMESTAMP_LTZ(3)).build();
CatalogTable catalogTable = new CatalogTableImpl(tableSchema, Collections.emptyMap(), null);
catalog.createTable(path1, catalogTable, false);
CatalogTable retrievedTable = (CatalogTable) catalog.getTable(path1);
assertEquals(tableSchema, retrievedTable.getSchema());
assertEquals(Collections.emptyMap(), retrievedTable.getOptions());
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogUdfITCase method testUdf.
private void testUdf(boolean batch) throws Exception {
StreamExecutionEnvironment env = null;
TableEnvironment tEnv;
EnvironmentSettings.Builder settingsBuilder = EnvironmentSettings.newInstance();
if (batch) {
settingsBuilder.inBatchMode();
} else {
settingsBuilder.inStreamingMode();
}
if (batch) {
tEnv = TableEnvironment.create(settingsBuilder.build());
} else {
env = StreamExecutionEnvironment.getExecutionEnvironment();
tEnv = StreamTableEnvironment.create(env, settingsBuilder.build());
}
BatchTestBase.configForMiniCluster(tEnv.getConfig());
tEnv.registerCatalog("myhive", hiveCatalog);
tEnv.useCatalog("myhive");
String innerSql = format("select mygenericudf(myudf(name), 1) as a, mygenericudf(myudf(age), 1) as b," + " s from %s, lateral table(myudtf(name, 1)) as T(s)", sourceTableName);
String selectSql = format("select a, s, sum(b), myudaf(b) from (%s) group by a, s", innerSql);
List<String> results;
if (batch) {
Path p = Paths.get(tempFolder.newFolder().getAbsolutePath(), "test.csv");
final TableSchema sinkSchema = TableSchema.builder().field("name1", Types.STRING()).field("name2", Types.STRING()).field("sum1", Types.INT()).field("sum2", Types.LONG()).build();
final Map<String, String> sinkOptions = new HashMap<>();
sinkOptions.put("connector.type", "filesystem");
sinkOptions.put("connector.path", p.toAbsolutePath().toString());
sinkOptions.put("format.type", "csv");
final CatalogTable sink = new CatalogTableImpl(sinkSchema, sinkOptions, "Comment.");
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sinkTableName), sink, false);
tEnv.executeSql(format("insert into %s " + selectSql, sinkTableName)).await();
// assert written result
StringBuilder builder = new StringBuilder();
try (Stream<Path> paths = Files.walk(Paths.get(p.toAbsolutePath().toString()))) {
paths.filter(Files::isRegularFile).forEach(path -> {
try {
String content = FileUtils.readFileUtf8(path.toFile());
if (content.isEmpty()) {
return;
}
builder.append(content);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
results = Arrays.stream(builder.toString().split("\n")).filter(s -> !s.isEmpty()).collect(Collectors.toList());
} else {
StreamTableEnvironment streamTEnv = (StreamTableEnvironment) tEnv;
TestingRetractSink sink = new TestingRetractSink();
streamTEnv.toRetractStream(tEnv.sqlQuery(selectSql), Row.class).map(new JavaToScala()).addSink((SinkFunction) sink);
env.execute("");
results = JavaScalaConversionUtil.toJava(sink.getRetractResults());
}
results = new ArrayList<>(results);
results.sort(String::compareTo);
Assert.assertEquals(Arrays.asList("1,1,2,2", "2,2,4,4", "3,3,6,6"), results);
}
Aggregations