use of org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore in project flink by apache.
the class InsensitiveOverFrame method open.
@Override
public void open(ExecutionContext ctx) throws Exception {
processor = aggsHandleFunction.newInstance(ctx.getRuntimeContext().getUserCodeClassLoader());
processor.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext()));
this.aggsHandleFunction = null;
}
use of org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore in project flink by apache.
the class SlidingOverFrame method open.
@Override
public void open(ExecutionContext ctx) throws Exception {
this.inputSer = new RowDataSerializer(inputType);
this.valueSer = new RowDataSerializer(valueType);
ClassLoader cl = ctx.getRuntimeContext().getUserCodeClassLoader();
processor = aggsHandleFunction.newInstance(cl);
processor.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext()));
buffer = new ArrayDeque<>();
this.aggsHandleFunction = null;
}
use of org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore in project flink by apache.
the class UnboundedOverWindowFrame method open.
@Override
public void open(ExecutionContext ctx) throws Exception {
ClassLoader cl = ctx.getRuntimeContext().getUserCodeClassLoader();
processor = aggsHandleFunction.newInstance(cl);
processor.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext()));
this.aggsHandleFunction = null;
this.valueSer = new RowDataSerializer(valueType.getChildren().toArray(new LogicalType[0]));
}
use of org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore in project flink by apache.
the class NonBufferOverWindowOperator method open.
@Override
public void open() throws Exception {
super.open();
ClassLoader cl = getUserCodeClassloader();
serializer = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn1(cl);
partitionComparator = genComparator.newInstance(cl);
genComparator = null;
collector = new StreamRecordCollector<>(output);
processors = new AggsHandleFunction[aggsHandlers.length];
joinedRows = new JoinedRowData[aggsHandlers.length];
for (int i = 0; i < aggsHandlers.length; i++) {
AggsHandleFunction func = aggsHandlers[i].newInstance(cl);
func.open(new PerKeyStateDataViewStore(getRuntimeContext()));
processors[i] = func;
joinedRows[i] = new JoinedRowData();
}
aggsHandlers = null;
}
use of org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore in project flink by apache.
the class ProcTimeRangeBoundedPrecedingFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext()));
output = new JoinedRowData();
// input element are all binary row as they are came from network
InternalTypeInfo<RowData> inputType = InternalTypeInfo.ofFields(inputFieldTypes);
// we keep the elements received in a map state indexed based on their ingestion time
ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<>(inputType);
MapStateDescriptor<Long, List<RowData>> mapStateDescriptor = new MapStateDescriptor<>("inputState", BasicTypeInfo.LONG_TYPE_INFO, rowListTypeInfo);
inputState = getRuntimeContext().getMapState(mapStateDescriptor);
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> stateDescriptor = new ValueStateDescriptor<RowData>("accState", accTypeInfo);
accState = getRuntimeContext().getState(stateDescriptor);
ValueStateDescriptor<Long> cleanupTsStateDescriptor = new ValueStateDescriptor<>("cleanupTsState", Types.LONG);
this.cleanupTsState = getRuntimeContext().getState(cleanupTsStateDescriptor);
}
Aggregations