use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class OggJsonDeserializationSchema method emitRow.
// --------------------------------------------------------------------------------------------
private void emitRow(GenericRowData rootRow, GenericRowData physicalRow, Collector<RowData> out) {
// shortcut in case no output projection is required
if (!hasMetadata) {
out.collect(physicalRow);
return;
}
final int physicalArity = physicalRow.getArity();
final int metadataArity = metadataConverters.length;
final GenericRowData producedRow = new GenericRowData(physicalRow.getRowKind(), physicalArity + metadataArity);
for (int physicalPos = 0; physicalPos < physicalArity; physicalPos++) {
producedRow.setField(physicalPos, physicalRow.getField(physicalPos));
}
for (int metadataPos = 0; metadataPos < metadataArity; metadataPos++) {
producedRow.setField(physicalArity + metadataPos, metadataConverters[metadataPos].convert(rootRow));
}
out.collect(producedRow);
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class OggJsonDeserializationSchema method deserialize.
@Override
public void deserialize(byte[] message, Collector<RowData> out) throws IOException {
if (message == null || message.length == 0) {
// skip tombstone messages
return;
}
try {
GenericRowData row = (GenericRowData) jsonDeserializer.deserialize(message);
GenericRowData before = (GenericRowData) row.getField(0);
GenericRowData after = (GenericRowData) row.getField(1);
String op = row.getField(2).toString();
if (OP_CREATE.equals(op)) {
after.setRowKind(RowKind.INSERT);
emitRow(row, after, out);
} else if (OP_UPDATE.equals(op)) {
if (before == null) {
throw new IllegalStateException(String.format(REPLICA_IDENTITY_EXCEPTION, "UPDATE"));
}
before.setRowKind(RowKind.UPDATE_BEFORE);
after.setRowKind(RowKind.UPDATE_AFTER);
emitRow(row, before, out);
emitRow(row, after, out);
} else if (OP_DELETE.equals(op)) {
if (before == null) {
throw new IllegalStateException(String.format(REPLICA_IDENTITY_EXCEPTION, "DELETE"));
}
before.setRowKind(RowKind.DELETE);
emitRow(row, before, out);
} else {
if (!ignoreParseErrors) {
throw new IOException(format("Unknown \"op_type\" value \"%s\". The Ogg JSON message is '%s'", op, new String(message)));
}
}
} catch (Throwable t) {
// a big try catch to protect the processing.
if (!ignoreParseErrors) {
throw new IOException(format("Corrupt Ogg JSON message '%s'.", new String(message)), t);
}
}
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class HiveInputFormatPartitionReaderITCase method testReadFormat.
private void testReadFormat(TableEnvironment tableEnv, HiveCatalog hiveCatalog, String format) throws Exception {
String tableName = prepareData(tableEnv, format);
ObjectPath tablePath = new ObjectPath("default", tableName);
TableSchema tableSchema = hiveCatalog.getTable(tablePath).getSchema();
// create partition reader
HiveInputFormatPartitionReader partitionReader = new HiveInputFormatPartitionReader(new Configuration(), new JobConf(hiveCatalog.getHiveConf()), hiveCatalog.getHiveVersion(), tablePath, tableSchema.getFieldDataTypes(), tableSchema.getFieldNames(), Collections.emptyList(), null, false);
Table hiveTable = hiveCatalog.getHiveTable(tablePath);
// create HiveTablePartition to read from
HiveTablePartition tablePartition = new HiveTablePartition(hiveTable.getSd(), HiveReflectionUtils.getTableMetadata(HiveShimLoader.loadHiveShim(hiveCatalog.getHiveVersion()), hiveTable));
partitionReader.open(Collections.singletonList(tablePartition));
GenericRowData reuse = new GenericRowData(tableSchema.getFieldCount());
int count = 0;
// this follows the way the partition reader is used during lookup join
while (partitionReader.read(reuse) != null) {
count++;
}
assertEquals(CollectionUtil.iteratorToList(tableEnv.executeSql("select * from " + tableName).collect()).size(), count);
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class DebeziumAvroDeserializationSchema method deserialize.
@Override
public void deserialize(byte[] message, Collector<RowData> out) throws IOException {
if (message == null || message.length == 0) {
// skip tombstone messages
return;
}
try {
GenericRowData row = (GenericRowData) avroDeserializer.deserialize(message);
GenericRowData before = (GenericRowData) row.getField(0);
GenericRowData after = (GenericRowData) row.getField(1);
String op = row.getField(2).toString();
if (OP_CREATE.equals(op) || OP_READ.equals(op)) {
after.setRowKind(RowKind.INSERT);
out.collect(after);
} else if (OP_UPDATE.equals(op)) {
if (before == null) {
throw new IllegalStateException(String.format(REPLICA_IDENTITY_EXCEPTION, "UPDATE"));
}
before.setRowKind(RowKind.UPDATE_BEFORE);
after.setRowKind(RowKind.UPDATE_AFTER);
out.collect(before);
out.collect(after);
} else if (OP_DELETE.equals(op)) {
if (before == null) {
throw new IllegalStateException(String.format(REPLICA_IDENTITY_EXCEPTION, "DELETE"));
}
before.setRowKind(RowKind.DELETE);
out.collect(before);
} else {
throw new IOException(format("Unknown \"op\" value \"%s\". The Debezium Avro message is '%s'", op, new String(message)));
}
} catch (Throwable t) {
// a big try catch to protect the processing.
throw new IOException("Can't deserialize Debezium Avro message.", t);
}
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class OrcBulkRowDataWriterTest method getResults.
private static List<RowData> getResults(Reader reader) throws IOException {
List<RowData> results = new ArrayList<>();
RecordReader recordReader = reader.rows();
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
while (recordReader.nextBatch(batch)) {
BytesColumnVector stringVector = (BytesColumnVector) batch.cols[0];
LongColumnVector intVector = (LongColumnVector) batch.cols[1];
ListColumnVector listVector = (ListColumnVector) batch.cols[2];
MapColumnVector mapVector = (MapColumnVector) batch.cols[3];
for (int r = 0; r < batch.size; r++) {
GenericRowData readRowData = new GenericRowData(4);
readRowData.setField(0, readStringData(stringVector, r));
readRowData.setField(1, readInt(intVector, r));
readRowData.setField(2, readList(listVector, r));
readRowData.setField(3, readMap(mapVector, r));
results.add(readRowData);
}
recordReader.close();
}
return results;
}
Aggregations