use of org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink in project flink by apache.
the class HBase1DynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
return new HBaseDynamicTableSink(tableName, hbaseSchema, hbaseConf, hBaseWriteOptions, nullStringLiteral);
}
use of org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink in project flink by apache.
the class HBaseDynamicTableFactoryTest method testTableSinkFactory.
@Test
public void testTableSinkFactory() {
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))), Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))), Column.physical(FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))), Column.physical(FAMILY4, ROW(FIELD(COL1, DECIMAL(10, 3)), FIELD(COL2, TIMESTAMP(3)), FIELD(COL3, DATE()), FIELD(COL4, TIME()))));
DynamicTableSink sink = createTableSink(schema, getAllOptions());
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
assertEquals(0, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
assertArrayEquals(new String[] { "f1", "f2", "f3", "f4" }, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] { "c1", "c2" }, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] { "c1", "c3" }, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] { "c2", "c3" }, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(new String[] { "c1", "c2", "c3", "c4" }, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(new DataType[] { DOUBLE(), INT() }, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(new DataType[] { INT(), BIGINT() }, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(new DataType[] { BOOLEAN(), STRING() }, hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(new DataType[] { DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME() }, hbaseSchema.getQualifierDataTypes("f4"));
// verify hadoop Configuration
org.apache.hadoop.conf.Configuration expectedConfiguration = HBaseConfigurationUtil.getHBaseConfiguration();
expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
expectedConfiguration.set("hbase.security.authentication", "kerberos");
org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
assertEquals(IteratorUtils.toList(expectedConfiguration.iterator()), IteratorUtils.toList(actualConfiguration.iterator()));
// verify tableName
assertEquals("testHBastTable", hbaseSink.getTableName());
HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder().setBufferFlushMaxRows(1000).setBufferFlushIntervalMillis(1000).setBufferFlushMaxSizeInBytes(2 * 1024 * 1024).build();
HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
assertEquals(expectedWriteOptions, actualWriteOptions);
}
use of org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink in project flink by apache.
the class HBaseDynamicTableFactoryTest method testBufferFlushOptions.
@Test
public void testBufferFlushOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.buffer-flush.max-size", "10mb");
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "10s");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions expected = HBaseWriteOptions.builder().setBufferFlushMaxRows(100).setBufferFlushIntervalMillis(10 * 1000).setBufferFlushMaxSizeInBytes(10 * 1024 * 1024).build();
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertEquals(expected, actual);
}
use of org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink in project flink by apache.
the class HBase2DynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
return new HBaseDynamicTableSink(tableName, hbaseSchema, hbaseConf, hBaseWriteOptions, nullStringLiteral);
}
use of org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink in project flink by apache.
the class HBaseDynamicTableFactoryTest method testTableSinkFactory.
@Test
public void testTableSinkFactory() {
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))), Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))), Column.physical(FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))), Column.physical(FAMILY4, ROW(FIELD(COL1, DECIMAL(10, 3)), FIELD(COL2, TIMESTAMP(3)), FIELD(COL3, DATE()), FIELD(COL4, TIME()))));
DynamicTableSink sink = createTableSink(schema, getAllOptions());
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
assertEquals(0, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
assertArrayEquals(new String[] { "f1", "f2", "f3", "f4" }, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] { "c1", "c2" }, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] { "c1", "c3" }, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] { "c2", "c3" }, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(new String[] { "c1", "c2", "c3", "c4" }, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(new DataType[] { DOUBLE(), INT() }, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(new DataType[] { INT(), BIGINT() }, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(new DataType[] { BOOLEAN(), STRING() }, hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(new DataType[] { DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME() }, hbaseSchema.getQualifierDataTypes("f4"));
// verify hadoop Configuration
org.apache.hadoop.conf.Configuration expectedConfiguration = HBaseConfigurationUtil.getHBaseConfiguration();
expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
expectedConfiguration.set("hbase.security.authentication", "kerberos");
org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
assertEquals(IteratorUtils.toList(expectedConfiguration.iterator()), IteratorUtils.toList(actualConfiguration.iterator()));
// verify tableName
assertEquals("testHBastTable", hbaseSink.getTableName());
HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder().setBufferFlushMaxRows(1000).setBufferFlushIntervalMillis(1000).setBufferFlushMaxSizeInBytes(2 * 1024 * 1024).build();
HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
assertEquals(expectedWriteOptions, actualWriteOptions);
}
Aggregations