use of org.apache.flink.table.data.RowData in project flink by apache.
the class HBaseSerdeTest method convertToReusedRowTest.
@Test
public void convertToReusedRowTest() {
HBaseSerde serde = createHBaseSerde();
List<List<Cell>> cellsList = prepareCells();
List<RowData> resultRowDatas = new ArrayList<>();
List<String> resultRowDataStr = new ArrayList<>();
for (List<Cell> cells : cellsList) {
RowData row = serde.convertToReusedRow(Result.create(cells));
resultRowDatas.add(row);
resultRowDataStr.add(row.toString());
}
// this verifies RowData is reused
assertTrue(resultRowDatas.get(0) == resultRowDatas.get(1));
List<String> expected = new ArrayList<>();
expected.add("+I(1,+I(10),+I(Hello-1,100),+I(1.01,false,Welt-1))");
expected.add("+I(2,+I(20),+I(Hello-2,200),+I(2.02,true,Welt-2))");
assertEquals(expected, resultRowDataStr);
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class HBaseSerde method createPutMutation.
/**
* Returns an instance of Put that writes record to HBase table.
*
* @return The appropriate instance of Put for this use case.
*/
@Nullable
public Put createPutMutation(RowData row) {
checkArgument(keyEncoder != null, "row key is not set.");
byte[] rowkey = keyEncoder.encode(row, rowkeyIndex);
if (rowkey.length == 0) {
// drop dirty records, rowkey shouldn't be zero length
return null;
}
// upsert
Put put = new Put(rowkey);
for (int i = 0; i < fieldLength; i++) {
if (i != rowkeyIndex) {
int f = i > rowkeyIndex ? i - 1 : i;
// get family key
byte[] familyKey = families[f];
RowData familyRow = row.getRow(i, qualifiers[f].length);
for (int q = 0; q < this.qualifiers[f].length; q++) {
// get quantifier key
byte[] qualifier = qualifiers[f][q];
// serialize value
byte[] value = qualifierEncoders[f][q].encode(familyRow, q);
put.addColumn(familyKey, qualifier, value);
}
}
}
return put;
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class HBaseRowDataLookupFunction method eval.
/**
* The invoke entry point of lookup function.
*
* @param rowKey the lookup key. Currently only support single rowkey.
*/
public void eval(Object rowKey) throws IOException {
if (cache != null) {
RowData cacheRowData = cache.getIfPresent(rowKey);
if (cacheRowData != null) {
collect(cacheRowData);
return;
}
}
for (int retry = 0; retry <= maxRetryTimes; retry++) {
try {
// fetch result
Get get = serde.createGet(rowKey);
if (get != null) {
Result result = table.get(get);
if (!result.isEmpty()) {
if (cache != null) {
// parse and collect
RowData rowData = serde.convertToNewRow(result);
collect(rowData);
cache.put(rowKey, rowData);
} else {
collect(serde.convertToReusedRow(result));
}
}
}
break;
} catch (IOException e) {
LOG.error(String.format("HBase lookup error, retry times = %d", retry), e);
if (retry >= maxRetryTimes) {
throw new RuntimeException("Execution of HBase lookup failed.", e);
}
try {
Thread.sleep(1000 * retry);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class RegistryAvroRowDataSeDeSchemaTest method testRowDataWriteReadWithSchema.
private void testRowDataWriteReadWithSchema(Schema schema) throws Exception {
DataType dataType = AvroSchemaConverter.convertToDataType(schema.toString());
RowType rowType = (RowType) dataType.getLogicalType();
AvroRowDataSerializationSchema serializer = getSerializationSchema(rowType, schema);
Schema writeSchema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType());
AvroRowDataDeserializationSchema deserializer = getDeserializationSchema(rowType, writeSchema);
serializer.open(null);
deserializer.open(null);
assertNull(deserializer.deserialize(null));
RowData oriData = address2RowData(address);
byte[] serialized = serializer.serialize(oriData);
RowData rowData = deserializer.deserialize(serialized);
assertThat(rowData.getArity(), equalTo(schema.getFields().size()));
assertEquals(address.getNum(), rowData.getInt(0));
assertEquals(address.getStreet(), rowData.getString(1).toString());
if (schema != ADDRESS_SCHEMA_COMPATIBLE) {
assertEquals(address.getCity(), rowData.getString(2).toString());
assertEquals(address.getState(), rowData.getString(3).toString());
assertEquals(address.getZip(), rowData.getString(4).toString());
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class RegistryAvroFormatFactoryTest method testDeserializationSchema.
@Test
public void testDeserializationSchema() {
final AvroRowDataDeserializationSchema expectedDeser = new AvroRowDataDeserializationSchema(ConfluentRegistryAvroDeserializationSchema.forGeneric(AvroSchemaConverter.convertToSchema(ROW_TYPE), REGISTRY_URL), AvroToRowDataConverters.createRowConverter(ROW_TYPE), InternalTypeInfo.of(ROW_TYPE));
final DynamicTableSource actualSource = createTableSource(SCHEMA, getDefaultOptions());
assertThat(actualSource, instanceOf(TestDynamicTableFactory.DynamicTableSourceMock.class));
TestDynamicTableFactory.DynamicTableSourceMock scanSourceMock = (TestDynamicTableFactory.DynamicTableSourceMock) actualSource;
DeserializationSchema<RowData> actualDeser = scanSourceMock.valueFormat.createRuntimeDecoder(ScanRuntimeProviderContext.INSTANCE, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedDeser, actualDeser);
}
Aggregations