Search in sources :

Example 36 with Buffer

use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.

the class PipelinedSubpartition method finish.

@Override
public void finish() throws IOException {
    final Buffer buffer = EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
    // view reference accessible outside the lock, but assigned inside the locked scope
    final PipelinedSubpartitionView reader;
    synchronized (buffers) {
        if (isFinished || isReleased) {
            return;
        }
        buffers.add(buffer);
        reader = readView;
        updateStatistics(buffer);
        isFinished = true;
    }
    LOG.debug("Finished {}.", this);
    // Notify the listener outside of the synchronized block
    if (reader != null) {
        reader.notifyBuffersAvailable(1);
    }
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer)

Example 37 with Buffer

use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.

the class SpilledSubpartitionView method getNextBuffer.

@Override
public Buffer getNextBuffer() throws IOException, InterruptedException {
    if (fileReader.hasReachedEndOfFile() || isSpillInProgress) {
        return null;
    }
    // TODO This is fragile as we implicitly expect that multiple calls to
    // this method don't happen before recycling buffers returned earlier.
    Buffer buffer = bufferPool.requestBufferBlocking();
    fileReader.readInto(buffer);
    return buffer;
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer)

Example 38 with Buffer

use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.

the class LocalInputChannel method getNextBuffer.

@Override
BufferAndAvailability getNextBuffer() throws IOException, InterruptedException {
    checkError();
    ResultSubpartitionView subpartitionView = this.subpartitionView;
    if (subpartitionView == null) {
        // this can happen if the request for the partition was triggered asynchronously
        // by the time trigger
        // would be good to avoid that, by guaranteeing that the requestPartition() and
        // getNextBuffer() always come from the same thread
        // we could do that by letting the timer insert a special "requesting channel" into the input gate's queue
        subpartitionView = checkAndWaitForSubpartitionView();
    }
    Buffer next = subpartitionView.getNextBuffer();
    if (next == null) {
        if (subpartitionView.isReleased()) {
            throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released.");
        } else {
            // notifications.
            throw new IllegalStateException("Consumed partition has no buffers available. " + "Number of received buffer notifications is " + numBuffersAvailable + ".");
        }
    }
    long remaining = numBuffersAvailable.decrementAndGet();
    if (remaining >= 0) {
        numBytesIn.inc(next.getSize());
        return new BufferAndAvailability(next, remaining > 0);
    } else if (subpartitionView.isReleased()) {
        throw new ProducerFailedException(subpartitionView.getFailureCause());
    } else {
        throw new IllegalStateException("No buffer available and producer partition not released.");
    }
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) ResultSubpartitionView(org.apache.flink.runtime.io.network.partition.ResultSubpartitionView) CancelTaskException(org.apache.flink.runtime.execution.CancelTaskException) ProducerFailedException(org.apache.flink.runtime.io.network.partition.ProducerFailedException)

Example 39 with Buffer

use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.

the class AbstractRecordReader method getNextRecord.

protected boolean getNextRecord(T target) throws IOException, InterruptedException {
    if (isFinished) {
        return false;
    }
    while (true) {
        if (currentRecordDeserializer != null) {
            DeserializationResult result = currentRecordDeserializer.getNextRecord(target);
            if (result.isBufferConsumed()) {
                final Buffer currentBuffer = currentRecordDeserializer.getCurrentBuffer();
                currentBuffer.recycle();
                currentRecordDeserializer = null;
            }
            if (result.isFullRecord()) {
                return true;
            }
        }
        final BufferOrEvent bufferOrEvent = inputGate.getNextBufferOrEvent();
        if (bufferOrEvent.isBuffer()) {
            currentRecordDeserializer = recordDeserializers[bufferOrEvent.getChannelIndex()];
            currentRecordDeserializer.setNextBuffer(bufferOrEvent.getBuffer());
        } else {
            // records, not in the middle of a fragment
            if (recordDeserializers[bufferOrEvent.getChannelIndex()].hasUnfinishedData()) {
                throw new IOException("Received an event in channel " + bufferOrEvent.getChannelIndex() + " while still having " + "data from a record. This indicates broken serialization logic. " + "If you are using custom serialization code (Writable or Value types), check their " + "serialization routines. In the case of Kryo, check the respective Kryo serializer.");
            }
            if (handleEvent(bufferOrEvent.getEvent())) {
                if (inputGate.isFinished()) {
                    isFinished = true;
                    return false;
                } else if (hasReachedEndOfSuperstep()) {
                    return false;
                }
            // else: More data is coming...
            }
        }
    }
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) DeserializationResult(org.apache.flink.runtime.io.network.api.serialization.RecordDeserializer.DeserializationResult) IOException(java.io.IOException) BufferOrEvent(org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent)

Example 40 with Buffer

use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.

the class AdaptiveSpanningRecordDeserializer method getCurrentBuffer.

@Override
public Buffer getCurrentBuffer() {
    Buffer tmp = currentBuffer;
    currentBuffer = null;
    return tmp;
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) ByteBuffer(java.nio.ByteBuffer)

Aggregations

Buffer (org.apache.flink.runtime.io.network.buffer.Buffer)66 Test (org.junit.Test)26 BufferProvider (org.apache.flink.runtime.io.network.buffer.BufferProvider)10 MemorySegment (org.apache.flink.core.memory.MemorySegment)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)9 ByteBuffer (java.nio.ByteBuffer)8 JobID (org.apache.flink.api.common.JobID)8 BufferRecycler (org.apache.flink.runtime.io.network.buffer.BufferRecycler)8 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)8 TaskActions (org.apache.flink.runtime.taskmanager.TaskActions)8 BufferOrEvent (org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent)7 SerializationTestType (org.apache.flink.runtime.io.network.api.serialization.types.SerializationTestType)6 TestInfiniteBufferProvider (org.apache.flink.runtime.io.network.util.TestInfiniteBufferProvider)6 IOException (java.io.IOException)5 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)5 Random (java.util.Random)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 AbstractEvent (org.apache.flink.runtime.event.AbstractEvent)4 IntermediateDataSetID (org.apache.flink.runtime.jobgraph.IntermediateDataSetID)4 UnregisteredTaskMetricsGroup (org.apache.flink.runtime.operators.testutils.UnregisteredTaskMetricsGroup)4