use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class PipelinedSubpartition method finish.
@Override
public void finish() throws IOException {
final Buffer buffer = EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
// view reference accessible outside the lock, but assigned inside the locked scope
final PipelinedSubpartitionView reader;
synchronized (buffers) {
if (isFinished || isReleased) {
return;
}
buffers.add(buffer);
reader = readView;
updateStatistics(buffer);
isFinished = true;
}
LOG.debug("Finished {}.", this);
// Notify the listener outside of the synchronized block
if (reader != null) {
reader.notifyBuffersAvailable(1);
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SpilledSubpartitionView method getNextBuffer.
@Override
public Buffer getNextBuffer() throws IOException, InterruptedException {
if (fileReader.hasReachedEndOfFile() || isSpillInProgress) {
return null;
}
// TODO This is fragile as we implicitly expect that multiple calls to
// this method don't happen before recycling buffers returned earlier.
Buffer buffer = bufferPool.requestBufferBlocking();
fileReader.readInto(buffer);
return buffer;
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class AdaptiveSpanningRecordDeserializer method getCurrentBuffer.
@Override
public Buffer getCurrentBuffer() {
Buffer tmp = currentBuffer;
currentBuffer = null;
return tmp;
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RecordWriterTest method testClearBuffersAfterInterruptDuringBlockingBufferRequest.
// ---------------------------------------------------------------------------------------------
// Resource release tests
// ---------------------------------------------------------------------------------------------
/**
* Tests a fix for FLINK-2089.
*
* @see <a href="https://issues.apache.org/jira/browse/FLINK-2089">FLINK-2089</a>
*/
@Test
public void testClearBuffersAfterInterruptDuringBlockingBufferRequest() throws Exception {
ExecutorService executor = null;
try {
executor = Executors.newSingleThreadExecutor();
final CountDownLatch sync = new CountDownLatch(2);
final Buffer buffer = spy(TestBufferFactory.createBuffer(4));
// Return buffer for first request, but block for all following requests.
Answer<Buffer> request = new Answer<Buffer>() {
@Override
public Buffer answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
if (sync.getCount() == 1) {
return buffer;
}
final Object o = new Object();
synchronized (o) {
while (true) {
o.wait();
}
}
}
};
BufferProvider bufferProvider = mock(BufferProvider.class);
when(bufferProvider.requestBufferBlocking()).thenAnswer(request);
ResultPartitionWriter partitionWriter = createResultPartitionWriter(bufferProvider);
final RecordWriter<IntValue> recordWriter = new RecordWriter<IntValue>(partitionWriter);
Future<?> result = executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
IntValue val = new IntValue(0);
try {
recordWriter.emit(val);
recordWriter.flush();
recordWriter.emit(val);
} catch (InterruptedException e) {
recordWriter.clearBuffers();
}
return null;
}
});
sync.await();
// Interrupt the Thread.
//
// The second emit call requests a new buffer and blocks the thread.
// When interrupting the thread at this point, clearing the buffers
// should not recycle any buffer.
result.cancel(true);
recordWriter.clearBuffers();
// Verify that buffer have been requested, but only one has been written out.
verify(bufferProvider, times(2)).requestBufferBlocking();
verify(partitionWriter, times(1)).writeBuffer(any(Buffer.class), anyInt());
// Verify that the written out buffer has only been recycled once
// (by the partition writer).
assertTrue("Buffer not recycled.", buffer.isRecycled());
verify(buffer, times(1)).recycle();
} finally {
if (executor != null) {
executor.shutdown();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class ResultPartitionWriterTest method testWriteBufferToAllChannelsReferenceCounting.
// ---------------------------------------------------------------------------------------------
// Resource release tests
// ---------------------------------------------------------------------------------------------
/**
* Tests that event buffers are properly recycled when broadcasting events
* to multiple channels.
*
* @throws Exception
*/
@Test
public void testWriteBufferToAllChannelsReferenceCounting() throws Exception {
Buffer buffer = EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
ResultPartition partition = new ResultPartition("TestTask", mock(TaskActions.class), new JobID(), new ResultPartitionID(), ResultPartitionType.PIPELINED, 2, 2, mock(ResultPartitionManager.class), mock(ResultPartitionConsumableNotifier.class), mock(IOManager.class), false);
ResultPartitionWriter partitionWriter = new ResultPartitionWriter(partition);
partitionWriter.writeBufferToAllChannels(buffer);
// Verify added to all queues, i.e. two buffers in total
assertEquals(2, partition.getTotalNumberOfBuffers());
// release the buffers in the partition
partition.release();
assertTrue(buffer.isRecycled());
}
Aggregations