use of org.apache.flink.table.runtime.typeutils.RowDataSerializer in project flink by apache.
the class StreamExecWindowAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final ZoneId shiftTimeZone = TimeWindowUtil.getShiftTimeZone(windowing.getTimeAttributeType(), config.getLocalTimeZone());
final SliceAssigner sliceAssigner = createSliceAssigner(windowing, shiftTimeZone);
// Hopping window requires additional COUNT(*) to determine whether to register next timer
// through whether the current fired window is empty, see SliceSharedWindowAggProcessor.
final AggregateInfoList aggInfoList = AggregateUtil.deriveStreamWindowAggregateInfoList(inputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), windowing.getWindow(), // isStateBackendDataViews
true);
final GeneratedNamespaceAggsHandleFunction<Long> generatedAggsHandler = createAggsHandler(sliceAssigner, aggInfoList, config, planner.getRelBuilder(), inputRowType.getChildren(), shiftTimeZone);
final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(grouping, InternalTypeInfo.of(inputRowType));
final LogicalType[] accTypes = convertToLogicalTypes(aggInfoList.getAccTypes());
final OneInputStreamOperator<RowData, RowData> windowOperator = SlicingWindowAggOperatorBuilder.builder().inputSerializer(new RowDataSerializer(inputRowType)).shiftTimeZone(shiftTimeZone).keySerializer((PagedTypeSerializer<RowData>) selector.getProducedType().toSerializer()).assigner(sliceAssigner).countStarIndex(aggInfoList.getIndexOfCountStar()).aggregate(generatedAggsHandler, new RowDataSerializer(accTypes)).build();
final OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(WINDOW_AGGREGATE_TRANSFORMATION, config), SimpleOperatorFactory.of(windowOperator), InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism(), WINDOW_AGG_MEMORY_RATIO);
// set KeyType and Selector for state
transform.setStateKeySelector(selector);
transform.setStateKeyType(selector.getProducedType());
return transform;
}
use of org.apache.flink.table.runtime.typeutils.RowDataSerializer in project flink by apache.
the class StreamExecGlobalWindowAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final ZoneId shiftTimeZone = TimeWindowUtil.getShiftTimeZone(windowing.getTimeAttributeType(), config.getLocalTimeZone());
final SliceAssigner sliceAssigner = createSliceAssigner(windowing, shiftTimeZone);
final AggregateInfoList localAggInfoList = AggregateUtil.deriveStreamWindowAggregateInfoList(// should use original input here
localAggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), windowing.getWindow(), // isStateBackendDataViews
false);
final AggregateInfoList globalAggInfoList = AggregateUtil.deriveStreamWindowAggregateInfoList(// should use original input here
localAggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), windowing.getWindow(), // isStateBackendDataViews
true);
// handler used to merge multiple local accumulators into one accumulator,
// where the accumulators are all on memory
final GeneratedNamespaceAggsHandleFunction<Long> localAggsHandler = createAggsHandler("LocalWindowAggsHandler", sliceAssigner, localAggInfoList, grouping.length, true, localAggInfoList.getAccTypes(), config, planner.getRelBuilder(), shiftTimeZone);
// handler used to merge the single local accumulator (on memory) into state accumulator
final GeneratedNamespaceAggsHandleFunction<Long> globalAggsHandler = createAggsHandler("GlobalWindowAggsHandler", sliceAssigner, globalAggInfoList, 0, true, localAggInfoList.getAccTypes(), config, planner.getRelBuilder(), shiftTimeZone);
// handler used to merge state accumulators for merging slices into window,
// e.g. Hop and Cumulate
final GeneratedNamespaceAggsHandleFunction<Long> stateAggsHandler = createAggsHandler("StateWindowAggsHandler", sliceAssigner, globalAggInfoList, 0, false, globalAggInfoList.getAccTypes(), config, planner.getRelBuilder(), shiftTimeZone);
final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(grouping, InternalTypeInfo.of(inputRowType));
final LogicalType[] accTypes = convertToLogicalTypes(globalAggInfoList.getAccTypes());
final OneInputStreamOperator<RowData, RowData> windowOperator = SlicingWindowAggOperatorBuilder.builder().inputSerializer(new RowDataSerializer(inputRowType)).shiftTimeZone(shiftTimeZone).keySerializer((PagedTypeSerializer<RowData>) selector.getProducedType().toSerializer()).assigner(sliceAssigner).countStarIndex(globalAggInfoList.getIndexOfCountStar()).globalAggregate(localAggsHandler, globalAggsHandler, stateAggsHandler, new RowDataSerializer(accTypes)).build();
final OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(GLOBAL_WINDOW_AGGREGATE_TRANSFORMATION, config), SimpleOperatorFactory.of(windowOperator), InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism(), WINDOW_AGG_MEMORY_RATIO);
// set KeyType and Selector for state
transform.setStateKeySelector(selector);
transform.setStateKeyType(selector.getProducedType());
return transform;
}
use of org.apache.flink.table.runtime.typeutils.RowDataSerializer in project flink by apache.
the class UnboundedFollowingOverFrame method open.
@Override
public void open(ExecutionContext ctx) throws Exception {
ClassLoader cl = ctx.getRuntimeContext().getUserCodeClassLoader();
processor = aggsHandleFunction.newInstance(cl);
processor.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext()));
this.aggsHandleFunction = null;
this.valueSer = new RowDataSerializer(valueType);
}
use of org.apache.flink.table.runtime.typeutils.RowDataSerializer in project flink by apache.
the class WindowOperator method open.
@Override
public void open() throws Exception {
super.open();
collector = new TimestampedCollector<>(output);
collector.eraseTimestamp();
internalTimerService = getInternalTimerService("window-timers", windowSerializer, this);
triggerContext = new TriggerContext();
triggerContext.open();
StateDescriptor<ValueState<RowData>, RowData> windowStateDescriptor = new ValueStateDescriptor<>("window-aggs", new RowDataSerializer(accumulatorTypes));
this.windowState = (InternalValueState<K, W, RowData>) getOrCreateKeyedState(windowSerializer, windowStateDescriptor);
if (produceUpdates) {
LogicalType[] valueTypes = ArrayUtils.addAll(aggResultTypes, windowPropertyTypes);
StateDescriptor<ValueState<RowData>, RowData> previousStateDescriptor = new ValueStateDescriptor<>("previous-aggs", new RowDataSerializer(valueTypes));
this.previousState = (InternalValueState<K, W, RowData>) getOrCreateKeyedState(windowSerializer, previousStateDescriptor);
}
compileGeneratedCode();
WindowContext windowContext = new WindowContext();
windowAggregator.open(new PerWindowStateDataViewStore(getKeyedStateBackend(), windowSerializer, getRuntimeContext()));
if (windowAssigner instanceof MergingWindowAssigner) {
this.windowFunction = new MergingWindowProcessFunction<>((MergingWindowAssigner<W>) windowAssigner, windowAggregator, windowSerializer, allowedLateness);
} else if (windowAssigner instanceof PanedWindowAssigner) {
this.windowFunction = new PanedWindowProcessFunction<>((PanedWindowAssigner<W>) windowAssigner, windowAggregator, allowedLateness);
} else {
this.windowFunction = new GeneralWindowProcessFunction<>(windowAssigner, windowAggregator, allowedLateness);
}
windowFunction.open(windowContext);
// metrics
this.numLateRecordsDropped = metrics.counter(LATE_ELEMENTS_DROPPED_METRIC_NAME);
this.lateRecordsDroppedRate = metrics.meter(LATE_ELEMENTS_DROPPED_RATE_METRIC_NAME, new MeterView(numLateRecordsDropped));
this.watermarkLatency = metrics.gauge(WATERMARK_LATENCY_METRIC_NAME, () -> {
long watermark = internalTimerService.currentWatermark();
if (watermark < 0) {
return 0L;
} else {
return internalTimerService.currentProcessingTime() - watermark;
}
});
}
use of org.apache.flink.table.runtime.typeutils.RowDataSerializer in project flink by apache.
the class RowDataTest method getBinaryRow.
private BinaryRowData getBinaryRow() {
BinaryRowData row = new BinaryRowData(18);
BinaryRowWriter writer = new BinaryRowWriter(row);
writer.writeBoolean(0, true);
writer.writeByte(1, (byte) 1);
writer.writeShort(2, (short) 2);
writer.writeInt(3, 3);
writer.writeLong(4, 4);
writer.writeFloat(5, 5);
writer.writeDouble(6, 6);
writer.writeString(8, str);
writer.writeRawValue(9, generic, genericSerializer);
writer.writeDecimal(10, decimal1, 5);
writer.writeDecimal(11, decimal2, 20);
writer.writeArray(12, array, new ArrayDataSerializer(DataTypes.INT().getLogicalType()));
writer.writeMap(13, map, new MapDataSerializer(DataTypes.INT().getLogicalType(), DataTypes.INT().getLogicalType()));
writer.writeRow(14, underRow, new RowDataSerializer(RowType.of(new IntType(), new IntType())));
writer.writeBinary(15, bytes);
writer.writeTimestamp(16, timestamp1, 3);
writer.writeTimestamp(17, timestamp2, 9);
return row;
}
Aggregations