use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class BatchArrowPythonGroupWindowAggregateFunctionOperator method open.
@Override
public void open() throws Exception {
super.open();
inputKeyAndWindow = new LinkedList<>();
windowProperty = new GenericRowData(namedProperties.length);
windowAggResult = new JoinedRowData();
windowsGrouping = new HeapWindowsGrouping(maxLimitSize, windowSize, slideSize, inputTimeFieldIndex, false);
forwardedInputSerializer = new RowDataSerializer(inputType);
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class PythonTableFunctionOperator method emitResult.
@Override
@SuppressWarnings("ConstantConditions")
public void emitResult(Tuple2<byte[], Integer> resultTuple) throws Exception {
byte[] rawUdtfResult;
int length;
if (isFinishResult) {
input = forwardedInputQueue.poll();
hasJoined = false;
}
do {
rawUdtfResult = resultTuple.f0;
length = resultTuple.f1;
isFinishResult = isFinishResult(rawUdtfResult, length);
if (!isFinishResult) {
reuseJoinedRow.setRowKind(input.getRowKind());
bais.setBuffer(rawUdtfResult, 0, rawUdtfResult.length);
RowData udtfResult = udtfOutputTypeSerializer.deserialize(baisWrapper);
rowDataWrapper.collect(reuseJoinedRow.replace(input, udtfResult));
resultTuple = pythonFunctionRunner.pollResult();
hasJoined = true;
} else if (joinType == FlinkJoinType.LEFT && !hasJoined) {
GenericRowData udtfResult = new GenericRowData(udfOutputType.getFieldCount());
for (int i = 0; i < udtfResult.getArity(); i++) {
udtfResult.setField(i, null);
}
rowDataWrapper.collect(reuseJoinedRow.replace(input, udtfResult));
}
} while (!isFinishResult && resultTuple != null);
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class AbstractJdbcRowConverter method toInternal.
@Override
public RowData toInternal(ResultSet resultSet) throws SQLException {
GenericRowData genericRowData = new GenericRowData(rowType.getFieldCount());
for (int pos = 0; pos < rowType.getFieldCount(); pos++) {
Object field = resultSet.getObject(pos + 1);
genericRowData.setField(pos, toInternalConverters[pos].deserialize(field));
}
return genericRowData;
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class JdbcRowDataInputFormatTest method testJdbcInputFormatWithParallelismAndNumericColumnSplitting.
@Test
public void testJdbcInputFormatWithParallelismAndNumericColumnSplitting() throws IOException {
final int fetchSize = 1;
final long min = TEST_DATA[0].id;
final long max = TEST_DATA[TEST_DATA.length - fetchSize].id;
JdbcParameterValuesProvider pramProvider = new JdbcNumericBetweenParametersProvider(min, max).ofBatchSize(fetchSize);
inputFormat = JdbcRowDataInputFormat.builder().setDrivername(DERBY_EBOOKSHOP_DB.getDriverClass()).setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()).setQuery(SELECT_ALL_BOOKS_SPLIT_BY_ID).setParametersProvider(pramProvider).setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE).setRowConverter(dialect.getRowConverter(rowType)).build();
inputFormat.openInputFormat();
InputSplit[] splits = inputFormat.createInputSplits(1);
// this query exploit parallelism (1 split for every id)
Assert.assertEquals(TEST_DATA.length, splits.length);
int recordCount = 0;
RowData row = new GenericRowData(5);
for (InputSplit split : splits) {
inputFormat.open(split);
while (!inputFormat.reachedEnd()) {
RowData next = inputFormat.nextRecord(row);
assertEquals(TEST_DATA[recordCount], next);
recordCount++;
}
inputFormat.close();
}
inputFormat.closeInputFormat();
Assert.assertEquals(TEST_DATA.length, recordCount);
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class JsonRowDataSerDeSchemaTest method testSerializationWithTypesMismatch.
@Test
public void testSerializationWithTypesMismatch() {
RowType rowType = (RowType) ROW(FIELD("f0", INT()), FIELD("f1", STRING())).getLogicalType();
GenericRowData genericRowData = new GenericRowData(2);
genericRowData.setField(0, 1);
genericRowData.setField(1, 1);
JsonRowDataSerializationSchema serializationSchema = new JsonRowDataSerializationSchema(rowType, TimestampFormat.SQL, JsonFormatOptions.MapNullKeyMode.FAIL, "null", true);
String errorMessage = "Fail to serialize at field: f1.";
try {
serializationSchema.serialize(genericRowData);
fail("expecting exception message: " + errorMessage);
} catch (Throwable t) {
assertThat(t, FlinkMatchers.containsMessage(errorMessage));
}
}
Aggregations