use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.
the class RandomSortMergeInnerJoinTest method newRow.
public static BinaryRowData newRow(int i, String s1, String s2) {
BinaryRowData row = new BinaryRowData(3);
BinaryRowWriter writer = new BinaryRowWriter(row);
writer.writeInt(0, i);
if (s1 == null) {
writer.setNullAt(1);
} else {
writer.writeString(1, StringData.fromString(s1));
}
if (s2 == null) {
writer.setNullAt(2);
} else {
writer.writeString(2, StringData.fromString(s2));
}
writer.complete();
return row;
}
use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.
the class BinaryKVInMemorySortBuffer method load.
private void load(long numElements, RandomAccessInputView recordInputView) throws IOException {
for (int index = 0; index < numElements; index++) {
serializer.checkSkipReadForFixLengthPart(recordInputView);
long pointer = recordInputView.getReadPosition();
BinaryRowData row = serializer1.mapFromPages(row1, recordInputView);
valueSerializer.checkSkipReadForFixLengthPart(recordInputView);
recordInputView.skipBytes(recordInputView.readInt());
boolean success = checkNextIndexOffset();
checkArgument(success);
writeIndexAndNormalizedKey(row, pointer);
}
}
use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.
the class BinaryHashTable method tryProbe.
// ========================== probe phase public method ======================================
/**
* Find matched build side rows for a probe row.
*
* @return return false if the target partition has spilled, we will spill this probe row too.
* The row will be re-match in rebuild phase.
*/
public boolean tryProbe(RowData record) throws IOException {
if (!this.probeIterator.hasSource()) {
// set the current probe value when probeIterator is null at the begging.
this.probeIterator.setInstance(record);
}
// calculate the hash
BinaryRowData probeKey = probeSideProjection.apply(record);
final int hash = hash(probeKey.hashCode(), this.currentRecursionDepth);
BinaryHashPartition p = this.partitionsBeingBuilt.get(hash % partitionsBeingBuilt.size());
// records
if (p.isInMemory()) {
this.probeKey = probeKey;
this.probeRow = record;
p.bucketArea.startLookup(hash);
return true;
} else {
if (p.testHashBloomFilter(hash)) {
BinaryRowData row = originProbeSideSerializer.toBinaryRow(record);
p.insertIntoProbeBuffer(row);
}
return false;
}
}
use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.
the class BinaryHashTable method applyCondition.
boolean applyCondition(BinaryRowData candidate) {
BinaryRowData buildKey = buildSideProjection.apply(candidate);
// They come from Projection, so we can make sure it is in byte[].
boolean equal = buildKey.getSizeInBytes() == probeKey.getSizeInBytes() && BinaryRowDataUtil.byteArrayEquals(buildKey.getSegments()[0].getHeapMemory(), probeKey.getSegments()[0].getHeapMemory(), buildKey.getSizeInBytes());
// TODO do null filter in advance?
if (!nullSafe) {
equal = equal && !(filterAllNulls ? buildKey.anyNull() : buildKey.anyNull(nullFilterKeys));
}
return condFunc == null ? equal : equal && (reverseJoin ? condFunc.apply(probeRow, candidate) : condFunc.apply(candidate, probeRow));
}
use of org.apache.flink.table.data.binary.BinaryRowData in project flink by apache.
the class AbstractBinaryWriter method writeRow.
@Override
public void writeRow(int pos, RowData input, RowDataSerializer serializer) {
if (input instanceof BinaryFormat) {
BinaryFormat row = (BinaryFormat) input;
writeSegmentsToVarLenPart(pos, row.getSegments(), row.getOffset(), row.getSizeInBytes());
} else {
BinaryRowData row = serializer.toBinaryRow(input);
writeSegmentsToVarLenPart(pos, row.getSegments(), row.getOffset(), row.getSizeInBytes());
}
}
Aggregations