Search in sources :

Example 1 with IntNormalizedKeyComputer

use of org.apache.flink.table.runtime.operators.sort.IntNormalizedKeyComputer in project flink by apache.

the class SumHashAggTestOperator method processElement.

@Override
public void processElement(StreamRecord<RowData> element) throws Exception {
    RowData in1 = element.getValue();
    // project key from input
    currentKeyWriter.reset();
    if (in1.isNullAt(0)) {
        currentKeyWriter.setNullAt(0);
    } else {
        currentKeyWriter.writeInt(0, in1.getInt(0));
    }
    currentKeyWriter.complete();
    // look up output buffer using current group key
    BytesMap.LookupInfo<BinaryRowData, BinaryRowData> lookupInfo = aggregateMap.lookup(currentKey);
    BinaryRowData currentAggBuffer = lookupInfo.getValue();
    if (!lookupInfo.isFound()) {
        // append empty agg buffer into aggregate map for current group key
        try {
            currentAggBuffer = aggregateMap.append(lookupInfo, emptyAggBuffer);
        } catch (EOFException exp) {
            // hash map out of memory, spill to external sorter
            if (sorter == null) {
                sorter = new BufferedKVExternalSorter(getIOManager(), new BinaryRowDataSerializer(keyTypes.length), new BinaryRowDataSerializer(aggBufferTypes.length), new IntNormalizedKeyComputer(), new IntRecordComparator(), getMemoryManager().getPageSize(), getConf());
            }
            // sort and spill
            sorter.sortAndSpill(aggregateMap.getRecordAreaMemorySegments(), aggregateMap.getNumElements(), new BytesHashMapSpillMemorySegmentPool(aggregateMap.getBucketAreaMemorySegments()));
            // retry append
            // reset aggregate map retry append
            aggregateMap.reset();
            lookupInfo = aggregateMap.lookup(currentKey);
            try {
                currentAggBuffer = aggregateMap.append(lookupInfo, emptyAggBuffer);
            } catch (EOFException e) {
                throw new OutOfMemoryError("BytesHashMap Out of Memory.");
            }
        }
    }
    if (!in1.isNullAt(1)) {
        long sumInput = in1.getLong(1);
        if (currentAggBuffer.isNullAt(0)) {
            currentAggBuffer.setLong(0, sumInput);
        } else {
            currentAggBuffer.setLong(0, sumInput + currentAggBuffer.getLong(0));
        }
    }
}
Also used : IntRecordComparator(org.apache.flink.table.runtime.operators.sort.IntRecordComparator) GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) JoinedRowData(org.apache.flink.table.data.utils.JoinedRowData) IntNormalizedKeyComputer(org.apache.flink.table.runtime.operators.sort.IntNormalizedKeyComputer) BytesMap(org.apache.flink.table.runtime.util.collections.binary.BytesMap) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) EOFException(java.io.EOFException) BufferedKVExternalSorter(org.apache.flink.table.runtime.operators.sort.BufferedKVExternalSorter) BinaryRowDataSerializer(org.apache.flink.table.runtime.typeutils.BinaryRowDataSerializer)

Aggregations

EOFException (java.io.EOFException)1 GenericRowData (org.apache.flink.table.data.GenericRowData)1 RowData (org.apache.flink.table.data.RowData)1 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)1 JoinedRowData (org.apache.flink.table.data.utils.JoinedRowData)1 BufferedKVExternalSorter (org.apache.flink.table.runtime.operators.sort.BufferedKVExternalSorter)1 IntNormalizedKeyComputer (org.apache.flink.table.runtime.operators.sort.IntNormalizedKeyComputer)1 IntRecordComparator (org.apache.flink.table.runtime.operators.sort.IntRecordComparator)1 BinaryRowDataSerializer (org.apache.flink.table.runtime.typeutils.BinaryRowDataSerializer)1 BytesMap (org.apache.flink.table.runtime.util.collections.binary.BytesMap)1