use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class RowDataKinesisDeserializationSchema method deserialize.
@Override
public RowData deserialize(byte[] recordValue, String partitionKey, String seqNum, long approxArrivalTimestamp, String stream, String shardId) throws IOException {
RowData physicalRow = physicalDeserializer.deserialize(recordValue);
GenericRowData metadataRow = new GenericRowData(requestedMetadataFields.size());
for (int i = 0; i < metadataRow.getArity(); i++) {
Metadata metadataField = requestedMetadataFields.get(i);
if (metadataField == Metadata.Timestamp) {
metadataRow.setField(i, TimestampData.fromEpochMillis(approxArrivalTimestamp));
} else if (metadataField == Metadata.SequenceNumber) {
metadataRow.setField(i, StringData.fromString(seqNum));
} else if (metadataField == Metadata.ShardId) {
metadataRow.setField(i, StringData.fromString(shardId));
} else {
String msg = String.format("Unsupported metadata key %s", metadataField);
// should never happen
throw new RuntimeException(msg);
}
}
return new JoinedRowData(physicalRow.getRowKind(), physicalRow, metadataRow);
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class PythonTableFunctionOperator method open.
@Override
@SuppressWarnings("unchecked")
public void open() throws Exception {
super.open();
rowDataWrapper = new StreamRecordRowDataWrappingCollector(output);
reuseJoinedRow = new JoinedRowData();
udtfInputProjection = udtfInputGeneratedProjection.newInstance(Thread.currentThread().getContextClassLoader());
forwardedInputSerializer = new RowDataSerializer(inputType);
udtfInputTypeSerializer = PythonTypeUtils.toInternalSerializer(udfInputType);
udtfOutputTypeSerializer = PythonTypeUtils.toInternalSerializer(udfOutputType);
input = null;
hasJoined = false;
isFinishResult = true;
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class AbstractArrowPythonAggregateFunctionOperator method open.
@SuppressWarnings("unchecked")
@Override
public void open() throws Exception {
super.open();
rowDataWrapper = new StreamRecordRowDataWrappingCollector(output);
reuseJoinedRow = new JoinedRowData();
udafInputProjection = udafInputGeneratedProjection.newInstance(Thread.currentThread().getContextClassLoader());
arrowSerializer = new ArrowSerializer(udfInputType, udfOutputType);
arrowSerializer.open(bais, baos);
currentBatchCount = 0;
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class AbstractTopNFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
outputRow = new JoinedRowData();
if (!isConstantRankEnd) {
ValueStateDescriptor<Long> rankStateDesc = new ValueStateDescriptor<>("rankEnd", Types.LONG);
if (ttlConfig.isEnabled()) {
rankStateDesc.enableTimeToLive(ttlConfig);
}
rankEndState = getRuntimeContext().getState(rankStateDesc);
}
// compile comparator
sortKeyComparator = generatedSortKeyComparator.newInstance(getRuntimeContext().getUserCodeClassLoader());
generatedSortKeyComparator = null;
invalidCounter = getRuntimeContext().getMetricGroup().counter("topn.invalidTopSize");
// initialize rankEndFetcher
if (!isConstantRankEnd) {
LogicalType rankEndIdxType = inputRowType.toRowFieldTypes()[rankEndIndex];
switch(rankEndIdxType.getTypeRoot()) {
case BIGINT:
rankEndFetcher = (RowData row) -> row.getLong(rankEndIndex);
break;
case INTEGER:
rankEndFetcher = (RowData row) -> (long) row.getInt(rankEndIndex);
break;
case SMALLINT:
rankEndFetcher = (RowData row) -> (long) row.getShort(rankEndIndex);
break;
default:
LOG.error("variable rank index column must be long, short or int type, while input type is {}", rankEndIdxType.getClass().getName());
throw new UnsupportedOperationException("variable rank index column must be long type, while input type is " + rankEndIdxType.getClass().getName());
}
}
}
use of org.apache.flink.table.data.utils.JoinedRowData in project flink by apache.
the class AbstractRowTimeUnboundedPrecedingOver method open.
@Override
public void open(Configuration parameters) throws Exception {
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext()));
output = new JoinedRowData();
sortedTimestamps = new LinkedList<Long>();
// initialize accumulator state
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> accStateDesc = new ValueStateDescriptor<RowData>("accState", accTypeInfo);
accState = getRuntimeContext().getState(accStateDesc);
// input element are all binary row as they are came from network
InternalTypeInfo<RowData> inputType = InternalTypeInfo.ofFields(inputFieldTypes);
ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<RowData>(inputType);
MapStateDescriptor<Long, List<RowData>> inputStateDesc = new MapStateDescriptor<Long, List<RowData>>("inputState", Types.LONG, rowListTypeInfo);
inputState = getRuntimeContext().getMapState(inputStateDesc);
initCleanupTimeState("RowTimeUnboundedOverCleanupTime");
// metrics
this.numLateRecordsDropped = getRuntimeContext().getMetricGroup().counter(LATE_ELEMENTS_DROPPED_METRIC_NAME);
}
Aggregations