Search in sources :

Example 11 with BufferProvider

use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.

the class StreamRecordWriterTest method getMockWriter.

private static ResultPartitionWriter getMockWriter(int numPartitions) throws Exception {
    BufferProvider mockProvider = mock(BufferProvider.class);
    when(mockProvider.requestBufferBlocking()).thenAnswer(new Answer<Buffer>() {

        @Override
        public Buffer answer(InvocationOnMock invocation) {
            return new Buffer(MemorySegmentFactory.allocateUnpooledSegment(4096), FreeingBufferRecycler.INSTANCE);
        }
    });
    ResultPartitionWriter mockWriter = mock(ResultPartitionWriter.class);
    when(mockWriter.getBufferProvider()).thenReturn(mockProvider);
    when(mockWriter.getNumberOfOutputChannels()).thenReturn(numPartitions);
    return mockWriter;
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider)

Example 12 with BufferProvider

use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.

the class StreamMockEnvironment method addOutput.

public <T> void addOutput(final Queue<Object> outputList, final TypeSerializer<T> serializer) {
    try {
        // The record-oriented writers wrap the buffer writer. We mock it
        // to collect the returned buffers and deserialize the content to
        // the output list
        BufferProvider mockBufferProvider = mock(BufferProvider.class);
        when(mockBufferProvider.requestBufferBlocking()).thenAnswer(new Answer<Buffer>() {

            @Override
            public Buffer answer(InvocationOnMock invocationOnMock) throws Throwable {
                return new Buffer(MemorySegmentFactory.allocateUnpooledSegment(bufferSize), mock(BufferRecycler.class));
            }
        });
        ResultPartitionWriter mockWriter = mock(ResultPartitionWriter.class);
        when(mockWriter.getNumberOfOutputChannels()).thenReturn(1);
        when(mockWriter.getBufferProvider()).thenReturn(mockBufferProvider);
        final RecordDeserializer<DeserializationDelegate<T>> recordDeserializer = new AdaptiveSpanningRecordDeserializer<DeserializationDelegate<T>>();
        final NonReusingDeserializationDelegate<T> delegate = new NonReusingDeserializationDelegate<T>(serializer);
        // Add records and events from the buffer to the output list
        doAnswer(new Answer<Void>() {

            @Override
            public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
                Buffer buffer = (Buffer) invocationOnMock.getArguments()[0];
                addBufferToOutputList(recordDeserializer, delegate, buffer, outputList);
                return null;
            }
        }).when(mockWriter).writeBuffer(any(Buffer.class), anyInt());
        doAnswer(new Answer<Void>() {

            @Override
            public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
                Buffer buffer = (Buffer) invocationOnMock.getArguments()[0];
                addBufferToOutputList(recordDeserializer, delegate, buffer, outputList);
                return null;
            }
        }).when(mockWriter).writeBufferToAllChannels(any(Buffer.class));
        outputs.add(mockWriter);
    } catch (Throwable t) {
        t.printStackTrace();
        fail(t.getMessage());
    }
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) AdaptiveSpanningRecordDeserializer(org.apache.flink.runtime.io.network.api.serialization.AdaptiveSpanningRecordDeserializer) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) InvocationOnMock(org.mockito.invocation.InvocationOnMock) NonReusingDeserializationDelegate(org.apache.flink.runtime.plugable.NonReusingDeserializationDelegate) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider) DeserializationDelegate(org.apache.flink.runtime.plugable.DeserializationDelegate) NonReusingDeserializationDelegate(org.apache.flink.runtime.plugable.NonReusingDeserializationDelegate)

Example 13 with BufferProvider

use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.

the class PipelinedSubpartitionTest method testProduceConsume.

private void testProduceConsume(boolean isSlowProducer, boolean isSlowConsumer) throws Exception {
    // Config
    final int producerBufferPoolSize = 8;
    final int producerNumberOfBuffersToProduce = 128;
    // Producer behaviour
    final TestProducerSource producerSource = new TestProducerSource() {

        private BufferProvider bufferProvider = new TestPooledBufferProvider(producerBufferPoolSize);

        private int numberOfBuffers;

        @Override
        public BufferOrEvent getNextBufferOrEvent() throws Exception {
            if (numberOfBuffers == producerNumberOfBuffersToProduce) {
                return null;
            }
            final Buffer buffer = bufferProvider.requestBufferBlocking();
            final MemorySegment segment = buffer.getMemorySegment();
            int next = numberOfBuffers * (segment.size() / 4);
            for (int i = 0; i < segment.size(); i += 4) {
                segment.putInt(i, next);
                next++;
            }
            numberOfBuffers++;
            return new BufferOrEvent(buffer, 0);
        }
    };
    // Consumer behaviour
    final TestConsumerCallback consumerCallback = new TestConsumerCallback() {

        private int numberOfBuffers;

        @Override
        public void onBuffer(Buffer buffer) {
            final MemorySegment segment = buffer.getMemorySegment();
            int expected = numberOfBuffers * (segment.size() / 4);
            for (int i = 0; i < segment.size(); i += 4) {
                assertEquals(expected, segment.getInt(i));
                expected++;
            }
            numberOfBuffers++;
            buffer.recycle();
        }

        @Override
        public void onEvent(AbstractEvent event) {
        // Nothing to do in this test
        }
    };
    final PipelinedSubpartition subpartition = createSubpartition();
    TestSubpartitionConsumer consumer = new TestSubpartitionConsumer(isSlowConsumer, consumerCallback);
    final PipelinedSubpartitionView view = subpartition.createReadView(null, consumer);
    consumer.setSubpartitionView(view);
    Future<Boolean> producerResult = executorService.submit(new TestSubpartitionProducer(subpartition, isSlowProducer, producerSource));
    Future<Boolean> consumerResult = executorService.submit(consumer);
    // Wait for producer and consumer to finish
    producerResult.get();
    consumerResult.get();
}
Also used : TestBufferFactory.createBuffer(org.apache.flink.runtime.io.network.util.TestBufferFactory.createBuffer) Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) TestPooledBufferProvider(org.apache.flink.runtime.io.network.util.TestPooledBufferProvider) TestConsumerCallback(org.apache.flink.runtime.io.network.util.TestConsumerCallback) TestProducerSource(org.apache.flink.runtime.io.network.util.TestProducerSource) AbstractEvent(org.apache.flink.runtime.event.AbstractEvent) MemorySegment(org.apache.flink.core.memory.MemorySegment) TestSubpartitionProducer(org.apache.flink.runtime.io.network.util.TestSubpartitionProducer) BufferOrEvent(org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent) TestSubpartitionConsumer(org.apache.flink.runtime.io.network.util.TestSubpartitionConsumer) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider) TestPooledBufferProvider(org.apache.flink.runtime.io.network.util.TestPooledBufferProvider)

Example 14 with BufferProvider

use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.

the class SpilledSubpartitionViewTest method testReadMultipleFilesWithSingleBufferPool.

@Test
public void testReadMultipleFilesWithSingleBufferPool() throws Exception {
    ExecutorService executor = null;
    BufferFileWriter[] writers = null;
    ResultSubpartitionView[] readers = null;
    try {
        executor = Executors.newCachedThreadPool();
        // Setup
        writers = new BufferFileWriter[] { createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, 512), createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, 512) };
        readers = new ResultSubpartitionView[writers.length];
        TestSubpartitionConsumer[] consumers = new TestSubpartitionConsumer[writers.length];
        BufferProvider inputBuffers = new TestPooledBufferProvider(2);
        ResultSubpartition parent = mock(ResultSubpartition.class);
        // Wait for writers to finish
        for (BufferFileWriter writer : writers) {
            writer.close();
        }
        // Create the views depending on the test configuration
        for (int i = 0; i < readers.length; i++) {
            consumers[i] = new TestSubpartitionConsumer(false, new TestConsumerCallback.RecyclingCallback());
            readers[i] = new SpilledSubpartitionView(parent, inputBuffers.getMemorySegmentSize(), writers[i], // +1 for end of partition event
            512 + 1, consumers[i]);
            consumers[i].setSubpartitionView(readers[i]);
        }
        final List<Future<Boolean>> results = Lists.newArrayList();
        // Submit the consuming tasks
        for (TestSubpartitionConsumer consumer : consumers) {
            results.add(executor.submit(consumer));
        }
        // Wait for the results
        for (Future<Boolean> res : results) {
            try {
                res.get(2, TimeUnit.MINUTES);
            } catch (TimeoutException e) {
                throw new TimeoutException("There has been a timeout in the test. This " + "indicates that there is a bug/deadlock in the tested subpartition " + "view.");
            }
        }
    } finally {
        if (writers != null) {
            for (BufferFileWriter writer : writers) {
                if (writer != null) {
                    writer.deleteChannel();
                }
            }
        }
        if (readers != null) {
            for (ResultSubpartitionView reader : readers) {
                if (reader != null) {
                    reader.releaseAllResources();
                }
            }
        }
        if (executor != null) {
            executor.shutdown();
        }
    }
}
Also used : TestPooledBufferProvider(org.apache.flink.runtime.io.network.util.TestPooledBufferProvider) BufferFileWriter(org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter) TestSubpartitionConsumer(org.apache.flink.runtime.io.network.util.TestSubpartitionConsumer) ExecutorService(java.util.concurrent.ExecutorService) TestInfiniteBufferProvider(org.apache.flink.runtime.io.network.util.TestInfiniteBufferProvider) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider) TestPooledBufferProvider(org.apache.flink.runtime.io.network.util.TestPooledBufferProvider) Future(java.util.concurrent.Future) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 15 with BufferProvider

use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.

the class PartitionRequestClientHandlerTest method testReleaseInputChannelDuringDecode.

/**
	 * Tests a fix for FLINK-1627.
	 *
	 * <p> FLINK-1627 discovered a race condition, which could lead to an infinite loop when a
	 * receiver was cancelled during a certain time of decoding a message. The test reproduces the
	 * input, which lead to the infinite loop: when the handler gets a reference to the buffer
	 * provider of the receiving input channel, but the respective input channel is released (and
	 * the corresponding buffer provider destroyed), the handler did not notice this.
	 *
	 * @see <a href="https://issues.apache.org/jira/browse/FLINK-1627">FLINK-1627</a>
	 */
@Test(timeout = 60000)
@SuppressWarnings("unchecked")
public void testReleaseInputChannelDuringDecode() throws Exception {
    // Mocks an input channel in a state as it was released during a decode.
    final BufferProvider bufferProvider = mock(BufferProvider.class);
    when(bufferProvider.requestBuffer()).thenReturn(null);
    when(bufferProvider.isDestroyed()).thenReturn(true);
    when(bufferProvider.addListener(any(EventListener.class))).thenReturn(false);
    final RemoteInputChannel inputChannel = mock(RemoteInputChannel.class);
    when(inputChannel.getInputChannelId()).thenReturn(new InputChannelID());
    when(inputChannel.getBufferProvider()).thenReturn(bufferProvider);
    final BufferResponse ReceivedBuffer = createBufferResponse(TestBufferFactory.createBuffer(), 0, inputChannel.getInputChannelId());
    final PartitionRequestClientHandler client = new PartitionRequestClientHandler();
    client.addInputChannel(inputChannel);
    client.channelRead(mock(ChannelHandlerContext.class), ReceivedBuffer);
}
Also used : InputChannelID(org.apache.flink.runtime.io.network.partition.consumer.InputChannelID) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider) BufferResponse(org.apache.flink.runtime.io.network.netty.NettyMessage.BufferResponse) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) EventListener(org.apache.flink.runtime.util.event.EventListener) RemoteInputChannel(org.apache.flink.runtime.io.network.partition.consumer.RemoteInputChannel) Test(org.junit.Test)

Aggregations

BufferProvider (org.apache.flink.runtime.io.network.buffer.BufferProvider)17 Test (org.junit.Test)11 Buffer (org.apache.flink.runtime.io.network.buffer.Buffer)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)7 InputChannelID (org.apache.flink.runtime.io.network.partition.consumer.InputChannelID)5 TestInfiniteBufferProvider (org.apache.flink.runtime.io.network.util.TestInfiniteBufferProvider)5 RemoteInputChannel (org.apache.flink.runtime.io.network.partition.consumer.RemoteInputChannel)4 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)3 EmbeddedChannel (io.netty.channel.embedded.EmbeddedChannel)3 IOException (java.io.IOException)3 MemorySegment (org.apache.flink.core.memory.MemorySegment)3 CancelTaskException (org.apache.flink.runtime.execution.CancelTaskException)3 ResultPartitionWriter (org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)3 BufferResponse (org.apache.flink.runtime.io.network.netty.NettyMessage.BufferResponse)3 BufferAvailabilityListener (org.apache.flink.runtime.io.network.partition.BufferAvailabilityListener)3 PartitionNotFoundException (org.apache.flink.runtime.io.network.partition.PartitionNotFoundException)3 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)3 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)3 Queue (java.util.Queue)2 ExecutorService (java.util.concurrent.ExecutorService)2