use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class RowTimeRowsBoundedPrecedingFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext()));
output = new JoinedRowData();
ValueStateDescriptor<Long> lastTriggeringTsDescriptor = new ValueStateDescriptor<Long>("lastTriggeringTsState", Types.LONG);
lastTriggeringTsState = getRuntimeContext().getState(lastTriggeringTsDescriptor);
ValueStateDescriptor<Long> dataCountStateDescriptor = new ValueStateDescriptor<Long>("processedCountState", Types.LONG);
counterState = getRuntimeContext().getState(dataCountStateDescriptor);
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> accStateDesc = new ValueStateDescriptor<RowData>("accState", accTypeInfo);
accState = getRuntimeContext().getState(accStateDesc);
// input element are all binary row as they are came from network
InternalTypeInfo<RowData> inputType = InternalTypeInfo.ofFields(inputFieldTypes);
ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<RowData>(inputType);
MapStateDescriptor<Long, List<RowData>> inputStateDesc = new MapStateDescriptor<Long, List<RowData>>("inputState", Types.LONG, rowListTypeInfo);
inputState = getRuntimeContext().getMapState(inputStateDesc);
initCleanupTimeState("RowTimeBoundedRowsOverCleanupTime");
// metrics
this.numLateRecordsDropped = getRuntimeContext().getMetricGroup().counter(LATE_ELEMENTS_DROPPED_METRIC_NAME);
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class StreamingJoinOperator method open.
@Override
public void open() throws Exception {
super.open();
this.outRow = new JoinedRowData();
this.leftNullRow = new GenericRowData(leftType.toRowSize());
this.rightNullRow = new GenericRowData(rightType.toRowSize());
// initialize states
if (leftIsOuter) {
this.leftRecordStateView = OuterJoinRecordStateViews.create(getRuntimeContext(), "left-records", leftInputSideSpec, leftType, stateRetentionTime);
} else {
this.leftRecordStateView = JoinRecordStateViews.create(getRuntimeContext(), "left-records", leftInputSideSpec, leftType, stateRetentionTime);
}
if (rightIsOuter) {
this.rightRecordStateView = OuterJoinRecordStateViews.create(getRuntimeContext(), "right-records", rightInputSideSpec, rightType, stateRetentionTime);
} else {
this.rightRecordStateView = JoinRecordStateViews.create(getRuntimeContext(), "right-records", rightInputSideSpec, rightType, stateRetentionTime);
}
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class BufferDataOverWindowOperator method processCurrentData.
private void processCurrentData() throws Exception {
currentData.complete();
for (OverWindowFrame frame : overWindowFrames) {
frame.prepare(currentData);
}
int rowIndex = 0;
ResettableExternalBuffer.BufferIterator bufferIterator = currentData.newIterator();
while (bufferIterator.advanceNext()) {
BinaryRowData currentRow = bufferIterator.getRow();
RowData output = currentRow;
// JoinedRowData is slow.
for (int i = 0; i < overWindowFrames.length; i++) {
OverWindowFrame frame = overWindowFrames[i];
RowData value = frame.process(rowIndex, currentRow);
output = joinedRows[i].replace(output, value);
}
collector.collect(output);
rowIndex += 1;
}
bufferIterator.close();
currentData.reset();
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class String2SortMergeJoinOperatorTest method testFullJoin.
@Test
public void testFullJoin() throws Exception {
StreamOperator joinOperator = newOperator(FlinkJoinType.FULL, leftIsSmall);
TwoInputStreamTaskTestHarness<BinaryRowData, BinaryRowData, JoinedRowData> testHarness = buildSortMergeJoin(joinOperator);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
expectedOutput.add(new StreamRecord<>(newRow("a", "02")));
expectedOutput.add(new StreamRecord<>(newRow("b", "14")));
expectedOutput.add(new StreamRecord<>(newRow("c", "2null")));
expectedOutput.add(new StreamRecord<>(newRow("d", "0null")));
testHarness.waitForTaskCompletion();
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, transformToBinary(testHarness.getOutput()));
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class String2SortMergeJoinOperatorTest method testLeftOuterJoin.
@Test
public void testLeftOuterJoin() throws Exception {
StreamOperator joinOperator = newOperator(FlinkJoinType.LEFT, leftIsSmall);
TwoInputStreamTaskTestHarness<BinaryRowData, BinaryRowData, JoinedRowData> testHarness = buildSortMergeJoin(joinOperator);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
expectedOutput.add(new StreamRecord<>(newRow("a", "02")));
expectedOutput.add(new StreamRecord<>(newRow("b", "14")));
expectedOutput.add(new StreamRecord<>(newRow("d", "0null")));
testHarness.waitForTaskCompletion();
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, transformToBinary(testHarness.getOutput()));
}
Aggregations