use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class CatalogITCase method testCreateCatalogFromUserClassLoader.
@Test
public void testCreateCatalogFromUserClassLoader() throws Exception {
final String className = "UserCatalogFactory";
URLClassLoader classLoader = ClassLoaderUtils.withRoot(temporaryFolder.newFolder()).addResource("META-INF/services/org.apache.flink.table.factories.Factory", "UserCatalogFactory").addClass(className, "import org.apache.flink.configuration.ConfigOption;\n" + "import org.apache.flink.table.catalog.Catalog;\n" + "import org.apache.flink.table.catalog.GenericInMemoryCatalog;\n" + "import org.apache.flink.table.factories.CatalogFactory;\n" + "\n" + "import java.util.Collections;\n" + "import java.util.Set;\n" + "\n" + "public class UserCatalogFactory implements CatalogFactory {\n" + " @Override\n" + " public Catalog createCatalog(Context context) {\n" + " return new GenericInMemoryCatalog(context.getName());\n" + " }\n" + "\n" + " @Override\n" + " public String factoryIdentifier() {\n" + " return \"userCatalog\";\n" + " }\n" + "\n" + " @Override\n" + " public Set<ConfigOption<?>> requiredOptions() {\n" + " return Collections.emptySet();\n" + " }\n" + "\n" + " @Override\n" + " public Set<ConfigOption<?>> optionalOptions() {\n" + " return Collections.emptySet();\n" + " }\n" + "}").build();
try (TemporaryClassLoaderContext context = TemporaryClassLoaderContext.of(classLoader)) {
TableEnvironment tableEnvironment = getTableEnvironment();
tableEnvironment.executeSql("CREATE CATALOG cat WITH ('type'='userCatalog')");
assertTrue(tableEnvironment.getCatalog("cat").isPresent());
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class FileSystemTableSourceTest method setup.
@Before
public void setup() {
util = streamTestUtil(TableConfig.getDefault());
TableEnvironment tEnv = util.getTableEnv();
String srcTableDdl = "CREATE TABLE MyTable (\n" + " a bigint,\n" + " b int,\n" + " c varchar\n" + ") with (\n" + " 'connector' = 'filesystem'," + " 'format' = 'testcsv'," + " 'path' = '/tmp')";
tEnv.executeSql(srcTableDdl);
String srcTableWithMetaDdl = "CREATE TABLE MyTableWithMeta (\n" + " a bigint,\n" + " b int,\n" + " c varchar,\n" + " filemeta STRING METADATA FROM 'file.path'\n" + ") with (\n" + " 'connector' = 'filesystem'," + " 'format' = 'testcsv'," + " 'path' = '/tmp')";
tEnv.executeSql(srcTableWithMetaDdl);
String sinkTableDdl = "CREATE TABLE MySink (\n" + " a bigint,\n" + " b int,\n" + " c varchar\n" + ") with (\n" + " 'connector' = 'values',\n" + " 'table-sink-class' = 'DEFAULT')";
tEnv.executeSql(sinkTableDdl);
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class FileSystemTableSinkTest method testFileSystemTableSinkWithParallelismInStreaming.
@Test
public void testFileSystemTableSinkWithParallelismInStreaming() {
final int parallelism = 5;
final TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
tEnv.getConfig().set(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 8);
final String testSourceTableName = "test_source_table";
tEnv.executeSql(buildSourceTableSql(testSourceTableName, false));
// verify operator parallelisms when compaction is not enabled
final String testSinkTableName = "test_sink_table";
tEnv.executeSql(buildSinkTableSql(testSinkTableName, parallelism, false));
final String sql0 = buildInsertIntoSql(testSinkTableName, testSourceTableName);
final String actualNormal = tEnv.explainSql(sql0, ExplainDetail.JSON_EXECUTION_PLAN);
final String expectedNormal = readFromResource("/explain/filesystem/testFileSystemTableSinkWithParallelismInStreamingSql0.out");
assertEquals(replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(expectedNormal))), replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(actualNormal))));
// verify operator parallelisms when compaction is enabled
final String testCompactSinkTableName = "test_compact_sink_table";
tEnv.executeSql(buildSinkTableSql(testCompactSinkTableName, parallelism, true));
final String sql1 = buildInsertIntoSql(testCompactSinkTableName, testSourceTableName);
final String actualCompact = tEnv.explainSql(sql1, ExplainDetail.JSON_EXECUTION_PLAN);
final String expectedCompact = readFromResource("/explain/filesystem/testFileSystemTableSinkWithParallelismInStreamingSql1.out");
assertEquals(replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(expectedCompact))), replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(actualCompact))));
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class FileSystemTableSinkTest method testExceptionWhenSettingParallelismWithUpdatingQuery.
@Test
public void testExceptionWhenSettingParallelismWithUpdatingQuery() {
final TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
final String testSourceTableName = "test_source_table";
tEnv.executeSql(buildSourceTableSql(testSourceTableName, false));
final String testSinkTableName = "test_sink_table";
tEnv.executeSql(buildSinkTableSql(testSinkTableName, 10, false));
String sql = String.format("INSERT INTO %s SELECT DISTINCT * FROM %s", testSinkTableName, testSourceTableName);
assertThrows("filesystem sink doesn't support setting parallelism (10) by 'sink.parallelism' when the input stream is not INSERT only.", ValidationException.class, () -> tEnv.explainSql(sql));
}
use of org.apache.flink.table.api.TableEnvironment in project zeppelin by apache.
the class Flink113Shims method startMultipleInsert.
@Override
public void startMultipleInsert(Object tblEnv, InterpreterContext context) throws Exception {
StatementSet statementSet = ((TableEnvironment) tblEnv).createStatementSet();
statementSetMap.put(context.getParagraphId(), statementSet);
}
Aggregations