Search in sources :

Example 26 with BinaryRowData

use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.

the class RandomSortMergeInnerJoinTest method newRow.

public static BinaryRowData newRow(int i, String s1, String s2) {
    BinaryRowData row = new BinaryRowData(3);
    BinaryRowWriter writer = new BinaryRowWriter(row);
    writer.writeInt(0, i);
    if (s1 == null) {
        writer.setNullAt(1);
    } else {
        writer.writeString(1, StringData.fromString(s1));
    }
    if (s2 == null) {
        writer.setNullAt(2);
    } else {
        writer.writeString(2, StringData.fromString(s2));
    }
    writer.complete();
    return row;
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) BinaryRowWriter(org.apache.flink.table.data.writer.BinaryRowWriter)

Example 27 with BinaryRowData

use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.

the class BinaryKVInMemorySortBuffer method load.

private void load(long numElements, RandomAccessInputView recordInputView) throws IOException {
    for (int index = 0; index < numElements; index++) {
        serializer.checkSkipReadForFixLengthPart(recordInputView);
        long pointer = recordInputView.getReadPosition();
        BinaryRowData row = serializer1.mapFromPages(row1, recordInputView);
        valueSerializer.checkSkipReadForFixLengthPart(recordInputView);
        recordInputView.skipBytes(recordInputView.readInt());
        boolean success = checkNextIndexOffset();
        checkArgument(success);
        writeIndexAndNormalizedKey(row, pointer);
    }
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData)

Example 28 with BinaryRowData

use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.

the class BinaryHashTable method tryProbe.

// ========================== probe phase public method ======================================
/**
 * Find matched build side rows for a probe row.
 *
 * @return return false if the target partition has spilled, we will spill this probe row too.
 *     The row will be re-match in rebuild phase.
 */
public boolean tryProbe(RowData record) throws IOException {
    if (!this.probeIterator.hasSource()) {
        // set the current probe value when probeIterator is null at the begging.
        this.probeIterator.setInstance(record);
    }
    // calculate the hash
    BinaryRowData probeKey = probeSideProjection.apply(record);
    final int hash = hash(probeKey.hashCode(), this.currentRecursionDepth);
    BinaryHashPartition p = this.partitionsBeingBuilt.get(hash % partitionsBeingBuilt.size());
    // records
    if (p.isInMemory()) {
        this.probeKey = probeKey;
        this.probeRow = record;
        p.bucketArea.startLookup(hash);
        return true;
    } else {
        if (p.testHashBloomFilter(hash)) {
            BinaryRowData row = originProbeSideSerializer.toBinaryRow(record);
            p.insertIntoProbeBuffer(row);
        }
        return false;
    }
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData)

Example 29 with BinaryRowData

use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.

the class BinaryHashTable method applyCondition.

boolean applyCondition(BinaryRowData candidate) {
    BinaryRowData buildKey = buildSideProjection.apply(candidate);
    // They come from Projection, so we can make sure it is in byte[].
    boolean equal = buildKey.getSizeInBytes() == probeKey.getSizeInBytes() && BinaryRowDataUtil.byteArrayEquals(buildKey.getSegments()[0].getHeapMemory(), probeKey.getSegments()[0].getHeapMemory(), buildKey.getSizeInBytes());
    // TODO do null filter in advance?
    if (!nullSafe) {
        equal = equal && !(filterAllNulls ? buildKey.anyNull() : buildKey.anyNull(nullFilterKeys));
    }
    return condFunc == null ? equal : equal && (reverseJoin ? condFunc.apply(probeRow, candidate) : condFunc.apply(candidate, probeRow));
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData)

Example 30 with BinaryRowData

use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.

the class AbstractBinaryWriter method writeRow.

@Override
public void writeRow(int pos, RowData input, RowDataSerializer serializer) {
    if (input instanceof BinaryFormat) {
        BinaryFormat row = (BinaryFormat) input;
        writeSegmentsToVarLenPart(pos, row.getSegments(), row.getOffset(), row.getSizeInBytes());
    } else {
        BinaryRowData row = serializer.toBinaryRow(input);
        writeSegmentsToVarLenPart(pos, row.getSegments(), row.getOffset(), row.getSizeInBytes());
    }
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) BinaryFormat(org.apache.flink.table.data.binary.BinaryFormat)

Aggregations

BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)173 Test (org.junit.Test)81 BinaryRowWriter (org.apache.flink.table.data.writer.BinaryRowWriter)54 RowData (org.apache.flink.table.data.RowData)31 ArrayList (java.util.ArrayList)30 MemoryManager (org.apache.flink.runtime.memory.MemoryManager)22 UniformBinaryRowGenerator (org.apache.flink.table.runtime.util.UniformBinaryRowGenerator)21 JoinedRowData (org.apache.flink.table.data.utils.JoinedRowData)16 MemorySegment (org.apache.flink.core.memory.MemorySegment)15 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)14 GenericRowData (org.apache.flink.table.data.GenericRowData)13 Random (java.util.Random)12 BinaryRowDataSerializer (org.apache.flink.table.runtime.typeutils.BinaryRowDataSerializer)12 HashMap (java.util.HashMap)9 RowDataSerializer (org.apache.flink.table.runtime.typeutils.RowDataSerializer)9 Map (java.util.Map)7 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)7 StreamOperator (org.apache.flink.streaming.api.operators.StreamOperator)7 RandomAccessInputView (org.apache.flink.runtime.io.disk.RandomAccessInputView)6 StreamRecord (org.apache.flink.streaming.runtime.streamrecord.StreamRecord)6