Search in sources :

Example 6 with ChannelWithMeta

use of org.apache.flink.table.runtime.io.ChannelWithMeta in project flink by apache.

the class ResettableExternalBuffer method spill.

private void spill() throws IOException {
    FileIOChannel.ID channel = ioManager.createChannel();
    final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
    int numRecordBuffers = inMemoryBuffer.getNumRecordBuffers();
    ArrayList<MemorySegment> segments = inMemoryBuffer.getRecordBufferSegments();
    try {
        // spill in memory buffer in zero-copy.
        for (int i = 0; i < numRecordBuffers; i++) {
            writer.writeBlock(segments.get(i));
        }
        LOG.info("here spill the reset buffer data with {} bytes", writer.getSize());
        writer.close();
    } catch (IOException e) {
        writer.closeAndDelete();
        throw e;
    }
    spillSize += numRecordBuffers * segmentSize;
    spilledChannelIDs.add(new ChannelWithMeta(channel, inMemoryBuffer.getNumRecordBuffers(), inMemoryBuffer.getNumBytesInLastBuffer()));
    this.spilledChannelRowOffsets.add(numRows);
    inMemoryBuffer.reset();
}
Also used : FileIOChannel(org.apache.flink.runtime.io.disk.iomanager.FileIOChannel) IOException(java.io.IOException) ChannelWithMeta(org.apache.flink.table.runtime.io.ChannelWithMeta) MemorySegment(org.apache.flink.core.memory.MemorySegment)

Example 7 with ChannelWithMeta

use of org.apache.flink.table.runtime.io.ChannelWithMeta in project flink by apache.

the class LongHybridHashTable method prepareNextPartition.

private boolean prepareNextPartition() throws IOException {
    // finalize and cleanup the partitions of the current table
    for (final LongHashPartition p : this.partitionsBeingBuilt) {
        p.finalizeProbePhase(this.partitionsPending);
    }
    this.partitionsBeingBuilt.clear();
    if (this.currentSpilledProbeSide != null) {
        this.currentSpilledProbeSide.getChannel().closeAndDelete();
        this.currentSpilledProbeSide = null;
    }
    if (this.partitionsPending.isEmpty()) {
        // no more data
        return false;
    }
    // there are pending partitions
    final LongHashPartition p = this.partitionsPending.get(0);
    LOG.info(String.format("Begin to process spilled partition [%d]", p.getPartitionNumber()));
    if (p.probeSideRecordCounter == 0) {
        this.partitionsPending.remove(0);
        return prepareNextPartition();
    }
    // build the next table; memory must be allocated after this call
    buildTableFromSpilledPartition(p);
    // set the probe side
    ChannelWithMeta channelWithMeta = new ChannelWithMeta(p.probeSideBuffer.getChannel().getChannelID(), p.probeSideBuffer.getBlockCount(), p.probeNumBytesInLastSeg);
    this.currentSpilledProbeSide = FileChannelUtil.createInputView(ioManager, channelWithMeta, new ArrayList<>(), compressionEnable, compressionCodecFactory, compressionBlockSize, segmentSize);
    ChannelReaderInputViewIterator<BinaryRowData> probeReader = new ChannelReaderInputViewIterator(this.currentSpilledProbeSide, new ArrayList<>(), this.probeSideSerializer);
    this.probeIterator.set(probeReader);
    this.probeIterator.setReuse(probeSideSerializer.createInstance());
    // unregister the pending partition
    this.partitionsPending.remove(0);
    this.currentRecursionDepth = p.getRecursionLevel() + 1;
    // recursively get the next
    return nextMatching();
}
Also used : ArrayList(java.util.ArrayList) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) ChannelReaderInputViewIterator(org.apache.flink.runtime.io.disk.ChannelReaderInputViewIterator) ChannelWithMeta(org.apache.flink.table.runtime.io.ChannelWithMeta)

Aggregations

ChannelWithMeta (org.apache.flink.table.runtime.io.ChannelWithMeta)7 ArrayList (java.util.ArrayList)4 IOException (java.io.IOException)3 FileIOChannel (org.apache.flink.runtime.io.disk.iomanager.FileIOChannel)3 ChannelReaderInputViewIterator (org.apache.flink.runtime.io.disk.ChannelReaderInputViewIterator)2 AbstractChannelWriterOutputView (org.apache.flink.runtime.io.disk.iomanager.AbstractChannelWriterOutputView)2 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)2 File (java.io.File)1 MemorySegment (org.apache.flink.core.memory.MemorySegment)1 AbstractChannelReaderInputView (org.apache.flink.runtime.io.disk.iomanager.AbstractChannelReaderInputView)1 BinaryRowChannelInputViewIterator (org.apache.flink.table.runtime.io.BinaryRowChannelInputViewIterator)1 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)1