Search in sources :

Example 66 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testLookupAsync.

@Test
public void testLookupAsync() {
    Map<String, String> options = getAllOptions();
    options.put("lookup.async", "true");
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))));
    DynamicTableSource source = createTableSource(schema, options);
    assertTrue(source instanceof HBaseDynamicTableSource);
    HBaseDynamicTableSource hbaseSource = (HBaseDynamicTableSource) source;
    int[][] lookupKey = { { 0 } };
    LookupTableSource.LookupRuntimeProvider lookupProvider = hbaseSource.getLookupRuntimeProvider(new LookupRuntimeProviderContext(lookupKey));
    assertTrue(lookupProvider instanceof AsyncTableFunctionProvider);
    AsyncTableFunction asyncTableFunction = ((AsyncTableFunctionProvider) lookupProvider).createAsyncTableFunction();
    assertTrue(asyncTableFunction instanceof HBaseRowDataAsyncLookupFunction);
    assertEquals("testHBastTable", ((HBaseRowDataAsyncLookupFunction) asyncTableFunction).getHTableName());
}
Also used : HBaseDynamicTableSource(org.apache.flink.connector.hbase2.source.HBaseDynamicTableSource) HBaseRowDataAsyncLookupFunction(org.apache.flink.connector.hbase2.source.HBaseRowDataAsyncLookupFunction) LookupRuntimeProviderContext(org.apache.flink.table.runtime.connector.source.LookupRuntimeProviderContext) AsyncTableFunctionProvider(org.apache.flink.table.connector.source.AsyncTableFunctionProvider) LookupTableSource(org.apache.flink.table.connector.source.LookupTableSource) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) AsyncTableFunction(org.apache.flink.table.functions.AsyncTableFunction) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) HBaseDynamicTableSource(org.apache.flink.connector.hbase2.source.HBaseDynamicTableSource) Test(org.junit.Test)

Example 67 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testTableSinkFactory.

@Test
public void testTableSinkFactory() {
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))), Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))), Column.physical(FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))), Column.physical(FAMILY4, ROW(FIELD(COL1, DECIMAL(10, 3)), FIELD(COL2, TIMESTAMP(3)), FIELD(COL3, DATE()), FIELD(COL4, TIME()))));
    DynamicTableSink sink = createTableSink(schema, getAllOptions());
    assertTrue(sink instanceof HBaseDynamicTableSink);
    HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
    HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
    assertEquals(0, hbaseSchema.getRowKeyIndex());
    assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
    assertArrayEquals(new String[] { "f1", "f2", "f3", "f4" }, hbaseSchema.getFamilyNames());
    assertArrayEquals(new String[] { "c1", "c2" }, hbaseSchema.getQualifierNames("f1"));
    assertArrayEquals(new String[] { "c1", "c3" }, hbaseSchema.getQualifierNames("f2"));
    assertArrayEquals(new String[] { "c2", "c3" }, hbaseSchema.getQualifierNames("f3"));
    assertArrayEquals(new String[] { "c1", "c2", "c3", "c4" }, hbaseSchema.getQualifierNames("f4"));
    assertArrayEquals(new DataType[] { DOUBLE(), INT() }, hbaseSchema.getQualifierDataTypes("f1"));
    assertArrayEquals(new DataType[] { INT(), BIGINT() }, hbaseSchema.getQualifierDataTypes("f2"));
    assertArrayEquals(new DataType[] { BOOLEAN(), STRING() }, hbaseSchema.getQualifierDataTypes("f3"));
    assertArrayEquals(new DataType[] { DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME() }, hbaseSchema.getQualifierDataTypes("f4"));
    // verify hadoop Configuration
    org.apache.hadoop.conf.Configuration expectedConfiguration = HBaseConfigurationUtil.getHBaseConfiguration();
    expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
    expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
    expectedConfiguration.set("hbase.security.authentication", "kerberos");
    org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
    assertEquals(IteratorUtils.toList(expectedConfiguration.iterator()), IteratorUtils.toList(actualConfiguration.iterator()));
    // verify tableName
    assertEquals("testHBastTable", hbaseSink.getTableName());
    HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder().setBufferFlushMaxRows(1000).setBufferFlushIntervalMillis(1000).setBufferFlushMaxSizeInBytes(2 * 1024 * 1024).build();
    HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
    assertEquals(expectedWriteOptions, actualWriteOptions);
}
Also used : HBaseDynamicTableSink(org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink) HBaseTableSchema(org.apache.flink.connector.hbase.util.HBaseTableSchema) HBaseDynamicTableSink(org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) HBaseWriteOptions(org.apache.flink.connector.hbase.options.HBaseWriteOptions) Test(org.junit.Test)

Example 68 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testBufferFlushOptions.

@Test
public void testBufferFlushOptions() {
    Map<String, String> options = getAllOptions();
    options.put("sink.buffer-flush.max-size", "10mb");
    options.put("sink.buffer-flush.max-rows", "100");
    options.put("sink.buffer-flush.interval", "10s");
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
    DynamicTableSink sink = createTableSink(schema, options);
    HBaseWriteOptions expected = HBaseWriteOptions.builder().setBufferFlushMaxRows(100).setBufferFlushIntervalMillis(10 * 1000).setBufferFlushMaxSizeInBytes(10 * 1024 * 1024).build();
    HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
    assertEquals(expected, actual);
}
Also used : HBaseDynamicTableSink(org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink) HBaseDynamicTableSink(org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) HBaseWriteOptions(org.apache.flink.connector.hbase.options.HBaseWriteOptions) Test(org.junit.Test)

Example 69 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testTypeWithUnsupportedPrecision.

@Test
public void testTypeWithUnsupportedPrecision() {
    Map<String, String> options = getAllOptions();
    // test unsupported timestamp precision
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, TIMESTAMP(6)), FIELD(COL2, INT()))));
    try {
        createTableSource(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIMESTAMP type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
    try {
        createTableSink(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIMESTAMP type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
    // test unsupported time precision
    schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, TIME(6)), FIELD(COL2, INT()))));
    try {
        createTableSource(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIME type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
    try {
        createTableSink(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIME type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
}
Also used : ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ExpectedException(org.junit.rules.ExpectedException) Test(org.junit.Test)

Example 70 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testTableSinkFactory.

@Test
public void testTableSinkFactory() {
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))), Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))), Column.physical(FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))), Column.physical(FAMILY4, ROW(FIELD(COL1, DECIMAL(10, 3)), FIELD(COL2, TIMESTAMP(3)), FIELD(COL3, DATE()), FIELD(COL4, TIME()))));
    DynamicTableSink sink = createTableSink(schema, getAllOptions());
    assertTrue(sink instanceof HBaseDynamicTableSink);
    HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
    HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
    assertEquals(0, hbaseSchema.getRowKeyIndex());
    assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
    assertArrayEquals(new String[] { "f1", "f2", "f3", "f4" }, hbaseSchema.getFamilyNames());
    assertArrayEquals(new String[] { "c1", "c2" }, hbaseSchema.getQualifierNames("f1"));
    assertArrayEquals(new String[] { "c1", "c3" }, hbaseSchema.getQualifierNames("f2"));
    assertArrayEquals(new String[] { "c2", "c3" }, hbaseSchema.getQualifierNames("f3"));
    assertArrayEquals(new String[] { "c1", "c2", "c3", "c4" }, hbaseSchema.getQualifierNames("f4"));
    assertArrayEquals(new DataType[] { DOUBLE(), INT() }, hbaseSchema.getQualifierDataTypes("f1"));
    assertArrayEquals(new DataType[] { INT(), BIGINT() }, hbaseSchema.getQualifierDataTypes("f2"));
    assertArrayEquals(new DataType[] { BOOLEAN(), STRING() }, hbaseSchema.getQualifierDataTypes("f3"));
    assertArrayEquals(new DataType[] { DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME() }, hbaseSchema.getQualifierDataTypes("f4"));
    // verify hadoop Configuration
    org.apache.hadoop.conf.Configuration expectedConfiguration = HBaseConfigurationUtil.getHBaseConfiguration();
    expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
    expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
    expectedConfiguration.set("hbase.security.authentication", "kerberos");
    org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
    assertEquals(IteratorUtils.toList(expectedConfiguration.iterator()), IteratorUtils.toList(actualConfiguration.iterator()));
    // verify tableName
    assertEquals("testHBastTable", hbaseSink.getTableName());
    HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder().setBufferFlushMaxRows(1000).setBufferFlushIntervalMillis(1000).setBufferFlushMaxSizeInBytes(2 * 1024 * 1024).build();
    HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
    assertEquals(expectedWriteOptions, actualWriteOptions);
}
Also used : HBaseDynamicTableSink(org.apache.flink.connector.hbase1.sink.HBaseDynamicTableSink) HBaseTableSchema(org.apache.flink.connector.hbase.util.HBaseTableSchema) HBaseDynamicTableSink(org.apache.flink.connector.hbase1.sink.HBaseDynamicTableSink) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) HBaseWriteOptions(org.apache.flink.connector.hbase.options.HBaseWriteOptions) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6