use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class WindowRankProcessor method open.
@Override
public void open(Context<Long> context) throws Exception {
this.ctx = context;
// compile comparator
sortKeyComparator = generatedSortKeyComparator.newInstance(ctx.getRuntimeContext().getUserCodeClassLoader());
final LongSerializer namespaceSerializer = LongSerializer.INSTANCE;
ListSerializer<RowData> listSerializer = new ListSerializer<>(inputSerializer);
MapStateDescriptor<RowData, List<RowData>> mapStateDescriptor = new MapStateDescriptor<>("window_rank", sortKeySerializer, listSerializer);
MapState<RowData, List<RowData>> state = ctx.getKeyedStateBackend().getOrCreateKeyedState(namespaceSerializer, mapStateDescriptor);
this.windowTimerService = new WindowTimerServiceImpl(ctx.getTimerService(), shiftTimeZone);
this.windowState = new WindowMapState<>((InternalMapState<RowData, Long, RowData, List<RowData>>) state);
this.windowBuffer = bufferFactory.create(ctx.getOperatorOwner(), ctx.getMemoryManager(), ctx.getMemorySize(), ctx.getRuntimeContext(), windowTimerService, ctx.getKeyedStateBackend(), windowState, true, shiftTimeZone);
this.reuseOutput = new JoinedRowData();
this.reuseRankRow = new GenericRowData(1);
this.currentProgress = Long.MIN_VALUE;
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class InputConversionOperator method processElement.
@Override
public void processElement(StreamRecord<E> element) throws Exception {
final E externalRecord = element.getValue();
final Object internalRecord;
try {
internalRecord = converter.toInternal(externalRecord);
} catch (Exception e) {
throw new FlinkRuntimeException(String.format("Error during input conversion from external DataStream API to " + "internal Table API data structures. Make sure that the " + "provided data types that configure the converters are " + "correctly declared in the schema. Affected record:\n%s", externalRecord), e);
}
final RowData payloadRowData;
if (requiresWrapping) {
final GenericRowData wrapped = new GenericRowData(RowKind.INSERT, 1);
wrapped.setField(0, internalRecord);
payloadRowData = wrapped;
} else {
// top-level records must not be null and will be skipped
if (internalRecord == null) {
return;
}
payloadRowData = (RowData) internalRecord;
}
final RowKind kind = payloadRowData.getRowKind();
if (isInsertOnly && kind != RowKind.INSERT) {
throw new FlinkRuntimeException(String.format("Error during input conversion. Conversion expects insert-only " + "records but DataStream API record contains: %s", kind));
}
if (!produceRowtimeMetadata) {
output.collect(outRecord.replace(payloadRowData));
return;
}
if (!element.hasTimestamp()) {
throw new FlinkRuntimeException("Could not find timestamp in DataStream API record. " + "Make sure that timestamps have been assigned before and " + "the event-time characteristic is enabled.");
}
final GenericRowData rowtimeRowData = new GenericRowData(1);
rowtimeRowData.setField(0, TimestampData.fromEpochMillis(element.getTimestamp()));
final JoinedRowData joinedRowData = new JoinedRowData(kind, payloadRowData, rowtimeRowData);
output.collect(outRecord.replace(joinedRowData));
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class AggregateWindowOperator method open.
@Override
public void open() throws Exception {
super.open();
reuseOutput = new JoinedRowData();
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class WindowTableFunctionOperator method open.
@Override
public void open() throws Exception {
super.open();
this.collector = new TimestampedCollector<>(output);
collector.eraseTimestamp();
outRow = new JoinedRowData();
windowProperties = new GenericRowData(3);
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class SumHashAggTestOperator method endInput.
public void endInput() throws Exception {
StreamRecord<RowData> outElement = new StreamRecord<>(null);
JoinedRowData hashAggOutput = new JoinedRowData();
GenericRowData aggValueOutput = new GenericRowData(1);
if (sorter == null) {
// no spilling, output by iterating aggregate map.
KeyValueIterator<BinaryRowData, BinaryRowData> iter = aggregateMap.getEntryIterator(false);
while (iter.advanceNext()) {
// set result and output
aggValueOutput.setField(0, iter.getValue().isNullAt(0) ? null : iter.getValue().getLong(0));
hashAggOutput.replace(iter.getKey(), aggValueOutput);
getOutput().collect(outElement.replace(hashAggOutput));
}
} else {
// spill last part of input' aggregation output buffer
sorter.sortAndSpill(aggregateMap.getRecordAreaMemorySegments(), aggregateMap.getNumElements(), new BytesHashMapSpillMemorySegmentPool(aggregateMap.getBucketAreaMemorySegments()));
// only release non-data memory in advance.
aggregateMap.free(true);
// fall back to sort based aggregation
BinaryRowData lastKey = null;
JoinedRowData fallbackInput = new JoinedRowData();
boolean aggSumIsNull = false;
long aggSum = -1;
// free hash map memory, but not release back to memory manager
MutableObjectIterator<Tuple2<BinaryRowData, BinaryRowData>> iterator = sorter.getKVIterator();
Tuple2<BinaryRowData, BinaryRowData> kv;
while ((kv = iterator.next()) != null) {
BinaryRowData key = kv.f0;
BinaryRowData value = kv.f1;
// prepare input
fallbackInput.replace(key, value);
if (lastKey == null) {
// found first key group
lastKey = key.copy();
aggSumIsNull = true;
aggSum = -1L;
} else if (key.getSizeInBytes() != lastKey.getSizeInBytes() || !(BinaryRowDataUtil.byteArrayEquals(key.getSegments()[0].getArray(), lastKey.getSegments()[0].getArray(), key.getSizeInBytes()))) {
// output current group aggregate result
aggValueOutput.setField(0, aggSumIsNull ? null : aggSum);
hashAggOutput.replace(lastKey, aggValueOutput);
getOutput().collect(outElement.replace(hashAggOutput));
// found new group
lastKey = key.copy();
aggSumIsNull = true;
aggSum = -1L;
}
if (!fallbackInput.isNullAt(1)) {
long sumInput = fallbackInput.getLong(1);
if (aggSumIsNull) {
aggSum = sumInput;
} else {
aggSum = aggSum + sumInput;
}
aggSumIsNull = false;
}
}
// output last key group aggregate result
aggValueOutput.setField(0, aggSumIsNull ? null : aggSum);
hashAggOutput.replace(lastKey, aggValueOutput);
getOutput().collect(outElement.replace(hashAggOutput));
}
}
Aggregations