Search in sources :

Example 16 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class HiveTableSinkITCase method testHiveTableSinkWithParallelismInStreaming.

@Test
public void testHiveTableSinkWithParallelismInStreaming() {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    final TableEnvironment tEnv = HiveTestUtils.createTableEnvInStreamingMode(env, SqlDialect.HIVE);
    testHiveTableSinkWithParallelismBase(tEnv, "/explain/testHiveTableSinkWithParallelismInStreaming.out");
}
Also used : StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) TableEnvironment(org.apache.flink.table.api.TableEnvironment) Test(org.junit.Test)

Example 17 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class HiveTableSinkITCase method testHiveTableSinkWithParallelismInBatch.

@Test
public void testHiveTableSinkWithParallelismInBatch() {
    final TableEnvironment tEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
    testHiveTableSinkWithParallelismBase(tEnv, "/explain/testHiveTableSinkWithParallelismInBatch.out");
}
Also used : StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) TableEnvironment(org.apache.flink.table.api.TableEnvironment) Test(org.junit.Test)

Example 18 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class HiveTableSinkITCase method testBatchAppend.

@Test
public void testBatchAppend() throws Exception {
    TableEnvironment tEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
    tEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
    tEnv.useCatalog(hiveCatalog.getName());
    tEnv.executeSql("create database db1");
    tEnv.useDatabase("db1");
    try {
        tEnv.executeSql("create table append_table (i int, j int)");
        tEnv.executeSql("insert into append_table select 1, 1").await();
        tEnv.executeSql("insert into append_table select 2, 2").await();
        List<Row> rows = CollectionUtil.iteratorToList(tEnv.executeSql("select * from append_table").collect());
        rows.sort(Comparator.comparingInt(o -> (int) o.getField(0)));
        Assert.assertEquals(Arrays.asList(Row.of(1, 1), Row.of(2, 2)), rows);
    } finally {
        tEnv.executeSql("drop database db1 cascade");
    }
}
Also used : StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Arrays(java.util.Arrays) Schema(org.apache.flink.table.api.Schema) FiniteTestSource(org.apache.flink.streaming.util.FiniteTestSource) ExplainDetail(org.apache.flink.table.api.ExplainDetail) ExceptionUtils(org.apache.flink.util.ExceptionUtils) RowTypeInfo(org.apache.flink.api.java.typeutils.RowTypeInfo) Lists(org.apache.flink.shaded.guava30.com.google.common.collect.Lists) Expressions(org.apache.flink.table.api.Expressions) TableTestUtil.replaceStreamNodeId(org.apache.flink.table.planner.utils.TableTestUtil.replaceStreamNodeId) Path(org.apache.flink.core.fs.Path) Map(java.util.Map) SINK_PARTITION_COMMIT_DELAY(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_PARTITION_COMMIT_DELAY) Assert.fail(org.junit.Assert.fail) URI(java.net.URI) SINK_PARTITION_COMMIT_POLICY_KIND(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_PARTITION_COMMIT_POLICY_KIND) TableEnvironment(org.apache.flink.table.api.TableEnvironment) AfterClass(org.junit.AfterClass) Expressions.$(org.apache.flink.table.api.Expressions.$) Set(java.util.Set) Table(org.apache.flink.table.api.Table) ZoneId(java.time.ZoneId) HiveTestUtils(org.apache.flink.table.catalog.hive.HiveTestUtils) CloseableIterator(org.apache.flink.util.CloseableIterator) List(java.util.List) Row(org.apache.flink.types.Row) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) PARTITION_TIME_EXTRACTOR_TIMESTAMP_PATTERN(org.apache.flink.connector.file.table.FileSystemConnectorOptions.PARTITION_TIME_EXTRACTOR_TIMESTAMP_PATTERN) BeforeClass(org.junit.BeforeClass) TableTestUtil.replaceStageId(org.apache.flink.table.planner.utils.TableTestUtil.replaceStageId) HiveCatalog(org.apache.flink.table.catalog.hive.HiveCatalog) HashMap(java.util.HashMap) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) ObjectPath(org.apache.flink.table.catalog.ObjectPath) ArrayList(java.util.ArrayList) TableTestUtil.readFromResource(org.apache.flink.table.planner.utils.TableTestUtil.readFromResource) Types(org.apache.flink.api.common.typeinfo.Types) TableTestUtil.replaceNodeIdInOperator(org.apache.flink.table.planner.utils.TableTestUtil.replaceNodeIdInOperator) Iterator(java.util.Iterator) Assert.assertTrue(org.junit.Assert.assertTrue) DataTypes(org.apache.flink.table.api.DataTypes) Test(org.junit.Test) IOException(java.io.IOException) CollectionUtil(org.apache.flink.util.CollectionUtil) File(java.io.File) DataStream(org.apache.flink.streaming.api.datastream.DataStream) Consumer(java.util.function.Consumer) SINK_PARTITION_COMMIT_POLICY_CLASS(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_PARTITION_COMMIT_POLICY_CLASS) SqlDialect(org.apache.flink.table.api.SqlDialect) Assert(org.junit.Assert) Comparator(java.util.Comparator) SINK_PARTITION_COMMIT_SUCCESS_FILE_NAME(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_PARTITION_COMMIT_SUCCESS_FILE_NAME) Assert.assertEquals(org.junit.Assert.assertEquals) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) TableEnvironment(org.apache.flink.table.api.TableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 19 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class TableEnvHiveConnectorITCase method testGetNonExistingFunction.

@Test
public void testGetNonExistingFunction() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    tableEnv.executeSql("create table db1.src (d double, s string)");
    tableEnv.executeSql("create table db1.dest (x bigint)");
    // just make sure the query runs through, no need to verify result
    tableEnv.executeSql("insert into db1.dest select count(d) from db1.src").await();
    tableEnv.executeSql("drop database db1 cascade");
}
Also used : TableEnvironment(org.apache.flink.table.api.TableEnvironment) Test(org.junit.Test)

Example 20 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class TableEnvHiveConnectorITCase method testDateTimestampPartitionColumns.

@Test
public void testDateTimestampPartitionColumns() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    try {
        tableEnv.executeSql("create table db1.part(x int) partitioned by (dt date,ts timestamp)");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit("dt='2019-12-23',ts='2019-12-23 00:00:00'");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 3 }).commit("dt='2019-12-25',ts='2019-12-25 16:23:43.012'");
        List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.part order by x").execute().collect());
        assertEquals("[+I[1, 2019-12-23, 2019-12-23T00:00], +I[2, 2019-12-23, 2019-12-23T00:00], +I[3, 2019-12-25, 2019-12-25T16:23:43.012]]", results.toString());
        results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select x from db1.part where dt=cast('2019-12-25' as date)").execute().collect());
        assertEquals("[+I[3]]", results.toString());
        tableEnv.executeSql("insert into db1.part select 4,cast('2019-12-31' as date),cast('2019-12-31 12:00:00.0' as timestamp)").await();
        results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select max(dt) from db1.part").execute().collect());
        assertEquals("[+I[2019-12-31]]", results.toString());
    } finally {
        tableEnv.executeSql("drop database db1 cascade");
    }
}
Also used : TableEnvironment(org.apache.flink.table.api.TableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Aggregations

TableEnvironment (org.apache.flink.table.api.TableEnvironment)137 Test (org.junit.Test)95 Row (org.apache.flink.types.Row)58 StreamTableEnvironment (org.apache.flink.table.api.bridge.java.StreamTableEnvironment)38 Table (org.apache.flink.table.api.Table)27 ObjectPath (org.apache.flink.table.catalog.ObjectPath)19 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)14 ArrayList (java.util.ArrayList)13 CatalogTable (org.apache.flink.table.catalog.CatalogTable)12 HashMap (java.util.HashMap)11 EnvironmentSettings (org.apache.flink.table.api.EnvironmentSettings)10 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)10 TableResult (org.apache.flink.table.api.TableResult)8 File (java.io.File)7 Constructor (java.lang.reflect.Constructor)7 TableImpl (org.apache.flink.table.api.internal.TableImpl)7 TableException (org.apache.flink.table.api.TableException)5 List (java.util.List)4 Configuration (org.apache.flink.configuration.Configuration)4 TableSchema (org.apache.flink.table.api.TableSchema)4