Search in sources :

Example 16 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class DataInputOutputSerializerTest method testWrapAsByteBuffer.

@Test
public void testWrapAsByteBuffer() {
    SerializationTestType randomInt = Util.randomRecord(SerializationTestTypeFactory.INT);
    DataOutputSerializer serializer = new DataOutputSerializer(randomInt.length());
    MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(randomInt.length());
    try {
        // empty buffer, read buffer should be empty
        ByteBuffer wrapper = serializer.wrapAsByteBuffer();
        Assert.assertEquals(0, wrapper.position());
        Assert.assertEquals(0, wrapper.limit());
        // write to data output, read buffer should still be empty
        randomInt.write(serializer);
        Assert.assertEquals(0, wrapper.position());
        Assert.assertEquals(0, wrapper.limit());
        // get updated read buffer, read buffer should contain written data
        wrapper = serializer.wrapAsByteBuffer();
        Assert.assertEquals(0, wrapper.position());
        Assert.assertEquals(randomInt.length(), wrapper.limit());
        // clear data output, read buffer should still contain written data
        serializer.clear();
        Assert.assertEquals(0, wrapper.position());
        Assert.assertEquals(randomInt.length(), wrapper.limit());
        // get updated read buffer, should be empty
        wrapper = serializer.wrapAsByteBuffer();
        Assert.assertEquals(0, wrapper.position());
        Assert.assertEquals(0, wrapper.limit());
        // write to data output and read back to memory
        randomInt.write(serializer);
        wrapper = serializer.wrapAsByteBuffer();
        segment.put(0, wrapper, randomInt.length());
        Assert.assertEquals(randomInt.length(), wrapper.position());
        Assert.assertEquals(randomInt.length(), wrapper.limit());
    } catch (IOException e) {
        e.printStackTrace();
        Assert.fail("Test encountered an unexpected exception.");
    }
}
Also used : SerializationTestType(org.apache.flink.runtime.io.network.api.serialization.types.SerializationTestType) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 17 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class SerializedUpdateBuffer method switchBuffers.

public ReadEnd switchBuffers() throws IOException {
    // remove exhausted read ends
    for (int i = readEnds.size() - 1; i >= 0; --i) {
        final ReadEnd re = readEnds.get(i);
        if (re.disposeIfDone()) {
            readEnds.remove(i);
        }
    }
    // add the current memorySegment and reset this writer
    final MemorySegment current = getCurrentSegment();
    current.putInt(0, getCurrentPositionInSegment());
    fullBuffers.addLast(current);
    // create the reader
    final ReadEnd readEnd;
    if (numBuffersSpilled == 0 && emptyBuffers.size() >= minBuffersForWriteEnd) {
        // read completely from in-memory segments
        readEnd = new ReadEnd(fullBuffers.removeFirst(), emptyBuffers, fullBuffers, null, null, 0);
    } else {
        int toSpill = Math.min(minBuffersForSpilledReadEnd + minBuffersForWriteEnd - emptyBuffers.size(), fullBuffers.size());
        // grab some empty buffers to re-read the first segment
        if (toSpill > 0) {
            // need to spill to make a buffers available
            if (currentWriter == null) {
                currentWriter = ioManager.createBlockChannelWriter(channelEnumerator.next(), emptyBuffers);
            }
            for (int i = 0; i < toSpill; i++) {
                currentWriter.writeBlock(fullBuffers.removeFirst());
            }
            numBuffersSpilled += toSpill;
        }
        // now close the writer and create the reader
        currentWriter.close();
        final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(currentWriter.getChannelID());
        // gather some memory segments to circulate while reading back the data
        final List<MemorySegment> readSegments = new ArrayList<MemorySegment>();
        try {
            while (readSegments.size() < minBuffersForSpilledReadEnd) {
                readSegments.add(emptyBuffers.take());
            }
            // read the first segment
            MemorySegment firstSeg = readSegments.remove(readSegments.size() - 1);
            reader.readBlock(firstSeg);
            firstSeg = reader.getReturnQueue().take();
            // create the read end reading one less buffer, because the first buffer is already read back
            readEnd = new ReadEnd(firstSeg, emptyBuffers, fullBuffers, reader, readSegments, numBuffersSpilled - 1);
        } catch (InterruptedException e) {
            throw new RuntimeException("SerializedUpdateBuffer was interrupted while reclaiming memory by spilling.", e);
        }
    }
    // reset the writer
    fullBuffers = new ArrayDeque<MemorySegment>(64);
    currentWriter = null;
    numBuffersSpilled = 0;
    try {
        seekOutput(emptyBuffers.take(), HEADER_LENGTH);
    } catch (InterruptedException e) {
        throw new RuntimeException("SerializedUpdateBuffer was interrupted while reclaiming memory by spilling.", e);
    }
    // register this read end
    readEnds.add(readEnd);
    return readEnd;
}
Also used : ArrayList(java.util.ArrayList) MemorySegment(org.apache.flink.core.memory.MemorySegment)

Example 18 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class ChannelReaderInputView method nextSegment.

// --------------------------------------------------------------------------------------------
//                                        Utilities
// --------------------------------------------------------------------------------------------
/**
	 * Gets the next segment from the asynchronous block reader. If more requests are to be issued, the method
	 * first sends a new request with the current memory segment. If no more requests are pending, the method
	 * adds the segment to the readers return queue, which thereby effectively collects all memory segments.
	 * Secondly, the method fetches the next non-consumed segment
	 * returned by the reader. If no further segments are available, this method thrown an {@link EOFException}.
	 * 
	 * @param current The memory segment used for the next request.
	 * @return The memory segment to read from next.
	 * 
	 * @throws EOFException Thrown, if no further segments are available.
	 * @throws IOException Thrown, if an I/O error occurred while reading 
	 * @see AbstractPagedInputView#nextSegment(org.apache.flink.core.memory.MemorySegment)
	 */
@Override
protected MemorySegment nextSegment(MemorySegment current) throws IOException {
    // check if we are at our end
    if (this.inLastBlock) {
        throw new EOFException();
    }
    // the next lines
    if (current != null) {
        sendReadRequest(current);
    }
    // get the next segment
    final MemorySegment seg = this.reader.getNextReturnedBlock();
    // check the header
    if (seg.getShort(0) != ChannelWriterOutputView.HEADER_MAGIC_NUMBER) {
        throw new IOException("The current block does not belong to a ChannelWriterOutputView / " + "ChannelReaderInputView: Wrong magic number.");
    }
    if ((seg.getShort(ChannelWriterOutputView.HEADER_FLAGS_OFFSET) & ChannelWriterOutputView.FLAG_LAST_BLOCK) != 0) {
        // last block
        this.numRequestsRemaining = 0;
        this.inLastBlock = true;
    }
    return seg;
}
Also used : EOFException(java.io.EOFException) IOException(java.io.IOException) MemorySegment(org.apache.flink.core.memory.MemorySegment)

Example 19 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class ChannelReaderInputView method close.

/**
	 * Closes this InputView, closing the underlying reader and returning all memory segments.
	 * 
	 * @return A list containing all memory segments originally supplied to this view.
	 * @throws IOException Thrown, if the underlying reader could not be properly closed.
	 */
public List<MemorySegment> close() throws IOException {
    if (this.closed) {
        throw new IllegalStateException("Already closed.");
    }
    this.closed = true;
    // re-collect all memory segments
    ArrayList<MemorySegment> list = this.freeMem;
    final MemorySegment current = getCurrentSegment();
    if (current != null) {
        list.add(current);
    }
    clear();
    // close the writer and gather all segments
    final LinkedBlockingQueue<MemorySegment> queue = this.reader.getReturnQueue();
    this.reader.close();
    while (list.size() < this.numSegments) {
        final MemorySegment m = queue.poll();
        if (m == null) {
            // we get null if the queue is empty. that should not be the case if the reader was properly closed.
            throw new RuntimeException("Bug in ChannelReaderInputView: MemorySegments lost.");
        }
        list.add(m);
    }
    return list;
}
Also used : MemorySegment(org.apache.flink.core.memory.MemorySegment)

Example 20 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class ChannelWriterOutputView method close.

// --------------------------------------------------------------------------------------------
/**
	 * Closes this OutputView, closing the underlying writer and returning all memory segments.
	 * 
	 * @return A list containing all memory segments originally supplied to this view.
	 * @throws IOException Thrown, if the underlying writer could not be properly closed.
	 */
public List<MemorySegment> close() throws IOException {
    // send off set last segment
    writeSegment(getCurrentSegment(), getCurrentPositionInSegment(), true);
    clear();
    // close the writer and gather all segments
    final LinkedBlockingQueue<MemorySegment> queue = this.writer.getReturnQueue();
    this.writer.close();
    // re-collect all memory segments
    ArrayList<MemorySegment> list = new ArrayList<MemorySegment>(this.numSegments);
    for (int i = 0; i < this.numSegments; i++) {
        final MemorySegment m = queue.poll();
        if (m == null) {
            // we get null if the queue is empty. that should not be the case if the reader was properly closed.
            throw new RuntimeException("ChannelWriterOutputView: MemorySegments have been taken from return queue by different actor.");
        }
        list.add(m);
    }
    return list;
}
Also used : ArrayList(java.util.ArrayList) MemorySegment(org.apache.flink.core.memory.MemorySegment)

Aggregations

MemorySegment (org.apache.flink.core.memory.MemorySegment)161 Test (org.junit.Test)86 DummyInvokable (org.apache.flink.runtime.operators.testutils.DummyInvokable)38 ArrayList (java.util.ArrayList)30 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)24 IntPair (org.apache.flink.runtime.operators.testutils.types.IntPair)24 MemoryAllocationException (org.apache.flink.runtime.memory.MemoryAllocationException)22 IOException (java.io.IOException)19 TestData (org.apache.flink.runtime.operators.testutils.TestData)18 FileIOChannel (org.apache.flink.runtime.io.disk.iomanager.FileIOChannel)17 UniformIntPairGenerator (org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator)16 IOManager (org.apache.flink.runtime.io.disk.iomanager.IOManager)15 IOManagerAsync (org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync)15 EOFException (java.io.EOFException)14 AbstractInvokable (org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable)14 Random (java.util.Random)11 ChannelReaderInputView (org.apache.flink.runtime.io.disk.iomanager.ChannelReaderInputView)10 UniformRecordGenerator (org.apache.flink.runtime.operators.testutils.UniformRecordGenerator)9 Record (org.apache.flink.types.Record)9 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)9