use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class NonBufferOverWindowOperator method open.
@Override
public void open() throws Exception {
super.open();
ClassLoader cl = getUserCodeClassloader();
serializer = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn1(cl);
partitionComparator = genComparator.newInstance(cl);
genComparator = null;
collector = new StreamRecordCollector<>(output);
processors = new AggsHandleFunction[aggsHandlers.length];
joinedRows = new JoinedRowData[aggsHandlers.length];
for (int i = 0; i < aggsHandlers.length; i++) {
AggsHandleFunction func = aggsHandlers[i].newInstance(cl);
func.open(new PerKeyStateDataViewStore(getRuntimeContext()));
processors[i] = func;
joinedRows[i] = new JoinedRowData();
}
aggsHandlers = null;
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class NonBufferOverWindowOperator method processElement.
@Override
public void processElement(StreamRecord<RowData> element) throws Exception {
RowData input = element.getValue();
boolean changePartition = lastInput == null || partitionComparator.compare(lastInput, input) != 0;
// calculate the ACC
RowData output = input;
for (int i = 0; i < processors.length; i++) {
AggsHandleFunction processor = processors[i];
if (changePartition || resetAccumulators[i]) {
processor.setAccumulators(processor.createAccumulators());
}
// TODO Reform AggsHandleFunction.getValue instead of use JoinedRowData. Multilayer
// JoinedRowData is slow.
processor.accumulate(input);
RowData value = processor.getValue();
output = joinedRows[i].replace(output, value);
}
collector.collect(output);
if (changePartition) {
lastInput = serializer.copy(input);
}
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class BufferDataOverWindowOperator method open.
@Override
public void open() throws Exception {
super.open();
ClassLoader cl = getUserCodeClassloader();
serializer = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn1(cl);
partitionComparator = genComparator.newInstance(cl);
genComparator = null;
MemoryManager memManager = getContainingTask().getEnvironment().getMemoryManager();
LazyMemorySegmentPool pool = new LazyMemorySegmentPool(this, memManager, (int) (computeMemorySize() / memManager.getPageSize()));
this.currentData = new ResettableExternalBuffer(getContainingTask().getEnvironment().getIOManager(), pool, serializer, isRowAllInFixedPart);
collector = new StreamRecordCollector<>(output);
joinedRows = new JoinedRowData[overWindowFrames.length];
for (int i = 0; i < overWindowFrames.length; i++) {
overWindowFrames[i].open(new ExecutionContextImpl(this, getRuntimeContext()));
joinedRows[i] = new JoinedRowData();
}
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class ProcTimeRangeBoundedPrecedingFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext()));
output = new JoinedRowData();
// input element are all binary row as they are came from network
InternalTypeInfo<RowData> inputType = InternalTypeInfo.ofFields(inputFieldTypes);
// we keep the elements received in a map state indexed based on their ingestion time
ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<>(inputType);
MapStateDescriptor<Long, List<RowData>> mapStateDescriptor = new MapStateDescriptor<>("inputState", BasicTypeInfo.LONG_TYPE_INFO, rowListTypeInfo);
inputState = getRuntimeContext().getMapState(mapStateDescriptor);
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> stateDescriptor = new ValueStateDescriptor<RowData>("accState", accTypeInfo);
accState = getRuntimeContext().getState(stateDescriptor);
ValueStateDescriptor<Long> cleanupTsStateDescriptor = new ValueStateDescriptor<>("cleanupTsState", Types.LONG);
this.cleanupTsState = getRuntimeContext().getState(cleanupTsStateDescriptor);
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class ProcTimeUnboundedPrecedingFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext()));
output = new JoinedRowData();
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> stateDescriptor = new ValueStateDescriptor<RowData>("accState", accTypeInfo);
accState = getRuntimeContext().getState(stateDescriptor);
initCleanupTimeState("ProcTimeUnboundedOverCleanupTime");
}
Aggregations