use of org.apache.flink.table.data.RowData in project flink by apache.
the class BatchArrowPythonGroupAggregateFunctionOperator method emitResult.
@Override
@SuppressWarnings("ConstantConditions")
public void emitResult(Tuple2<byte[], Integer> resultTuple) throws Exception {
byte[] udafResult = resultTuple.f0;
int length = resultTuple.f1;
bais.setBuffer(udafResult, 0, length);
int rowCount = arrowSerializer.load();
for (int i = 0; i < rowCount; i++) {
RowData key = forwardedInputQueue.poll();
reuseJoinedRow.setRowKind(key.getRowKind());
RowData result = arrowSerializer.read(i);
rowDataWrapper.collect(reuseJoinedRow.replace(key, result));
}
arrowSerializer.resetReader();
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class BatchArrowPythonOverWindowAggregateFunctionOperator method emitResult.
@Override
@SuppressWarnings("ConstantConditions")
public void emitResult(Tuple2<byte[], Integer> resultTuple) throws Exception {
byte[] udafResult = resultTuple.f0;
int length = resultTuple.f1;
bais.setBuffer(udafResult, 0, length);
int rowCount = arrowSerializer.load();
for (int i = 0; i < rowCount; i++) {
RowData input = forwardedInputQueue.poll();
lastKeyDataStartPos--;
reuseJoinedRow.setRowKind(input.getRowKind());
rowDataWrapper.collect(reuseJoinedRow.replace(input, arrowSerializer.read(i)));
}
arrowSerializer.resetReader();
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class AbstractStreamArrowPythonBoundedRowsOperator method emitResult.
@Override
@SuppressWarnings("ConstantConditions")
public void emitResult(Tuple2<byte[], Integer> resultTuple) throws Exception {
byte[] udafResult = resultTuple.f0;
int length = resultTuple.f1;
bais.setBuffer(udafResult, 0, length);
int rowCount = arrowSerializer.load();
for (int i = 0; i < rowCount; i++) {
RowData data = arrowSerializer.read(i);
RowData key = forwardedInputQueue.poll();
reuseJoinedRow.setRowKind(key.getRowKind());
rowDataWrapper.collect(reuseJoinedRow.replace(key, data));
}
arrowSerializer.resetReader();
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class AbstractStreamArrowPythonBoundedRowsOperator method triggerWindowProcess.
void triggerWindowProcess(List<RowData> inputs, int i, int index) throws Exception {
if (windowData.isEmpty()) {
if (i >= lowerBoundary) {
for (int j = (int) (i - lowerBoundary); j <= i; j++) {
windowData.add(inputs.get(j));
}
currentBatchCount += lowerBoundary;
} else {
for (int j = 0; j <= i; j++) {
RowData rowData = inputs.get(j);
windowData.add(rowData);
currentBatchCount++;
}
Long previousTimestamp;
List<RowData> previousData;
int length;
long remainingDataCount = lowerBoundary - i;
ListIterator<Long> iter = sortedTimestamps.listIterator(index);
while (remainingDataCount > 0 && iter.hasPrevious()) {
previousTimestamp = iter.previous();
previousData = inputState.get(previousTimestamp);
length = previousData.size();
ListIterator<RowData> previousDataIter = previousData.listIterator(length);
while (previousDataIter.hasPrevious() && remainingDataCount > 0) {
windowData.addFirst(previousDataIter.previous());
remainingDataCount--;
currentBatchCount++;
}
}
// clear outdated data.
while (iter.hasPrevious()) {
previousTimestamp = iter.previous();
inputState.remove(previousTimestamp);
}
}
} else {
if (windowData.size() > lowerBoundary) {
windowData.pop();
}
windowData.add(inputs.get(i));
currentBatchCount += windowData.size();
}
for (RowData rowData : windowData) {
arrowSerializer.write(getFunctionInput(rowData));
}
invokeCurrentBatch();
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class RowWriter method doWrite.
@Override
public void doWrite(T in, int ordinal) {
RowData row;
if (isNullAt(in, ordinal)) {
row = nullRow;
((StructVector) getValueVector()).setNull(getCount());
} else {
row = readRow(in, ordinal);
((StructVector) getValueVector()).setIndexDefined(getCount());
}
for (int i = 0; i < fieldsWriters.length; i++) {
fieldsWriters[i].write(row, i);
}
}
Aggregations