use of org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter in project flink by apache.
the class SpillableSubpartitionView method releaseMemory.
int releaseMemory() throws IOException {
synchronized (buffers) {
if (spilledView != null || nextBuffer == null) {
// Already spilled or nothing in-memory
return 0;
} else {
// We don't touch next buffer, because a notification has
// already been sent for it. Only when it is consumed, will
// it be recycled.
// Create the spill writer and write all buffers to disk
BufferFileWriter spillWriter = ioManager.createBufferFileWriter(ioManager.createChannel());
long spilledBytes = 0;
int numBuffers = buffers.size();
for (int i = 0; i < numBuffers; i++) {
Buffer buffer = buffers.remove();
spilledBytes += buffer.getSize();
try {
spillWriter.writeBlock(buffer);
} finally {
buffer.recycle();
}
}
spilledView = new SpilledSubpartitionView(parent, memorySegmentSize, spillWriter, numBuffers, listener);
LOG.debug("Spilling {} bytes for sub partition {} of {}.", spilledBytes, parent.index, parent.parent.getPartitionId());
return numBuffers;
}
}
}
use of org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter in project flink by apache.
the class SpilledSubpartitionViewTest method testWriteConsume.
@Test
public void testWriteConsume() throws Exception {
// Config
final int numberOfBuffersToWrite = 512;
// Setup
final BufferFileWriter writer = createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, numberOfBuffersToWrite);
writer.close();
TestPooledBufferProvider viewBufferPool = new TestPooledBufferProvider(1);
TestSubpartitionConsumer consumer = new TestSubpartitionConsumer(false, new TestConsumerCallback.RecyclingCallback());
SpilledSubpartitionView view = new SpilledSubpartitionView(mock(ResultSubpartition.class), viewBufferPool.getMemorySegmentSize(), writer, // +1 for end-of-partition
numberOfBuffersToWrite + 1, consumer);
consumer.setSubpartitionView(view);
// Consume subpartition
consumer.call();
}
use of org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter in project flink by apache.
the class SpilledSubpartitionViewTest method testConsumeWithFewBuffers.
@Test
public void testConsumeWithFewBuffers() throws Exception {
// Config
final int numberOfBuffersToWrite = 512;
// Setup
final BufferFileWriter writer = createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, numberOfBuffersToWrite);
writer.close();
TestSubpartitionConsumer consumer = new TestSubpartitionConsumer(false, new TestConsumerCallback.RecyclingCallback());
SpilledSubpartitionView view = new SpilledSubpartitionView(mock(ResultSubpartition.class), 32 * 1024, writer, numberOfBuffersToWrite + 1, consumer);
consumer.setSubpartitionView(view);
// No buffer available, don't deadlock. We need to make progress in situations when the view
// is consumed at an input gate with local and remote channels. The remote channels might
// eat up all the buffers, at which point the spilled view will not have any buffers
// available and the input gate can't make any progress if we don't return immediately.
//
// The current solution is straight-forward with a separate buffer per spilled subpartition,
// but introduces memory-overhead.
//
// TODO Replace with asynchronous buffer pool request as this introduces extra buffers per
// consumed subpartition.
consumer.call();
}
use of org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter in project flink by apache.
the class CompressedHeaderlessChannelTest method testCompressedView.
@Test
public void testCompressedView() throws IOException {
for (int testTime = 0; testTime < 10; testTime++) {
int testRounds = new Random().nextInt(20000);
FileIOChannel.ID channel = ioManager.createChannel();
BufferFileWriter writer = this.ioManager.createBufferFileWriter(channel);
CompressedHeaderlessChannelWriterOutputView outputView = new CompressedHeaderlessChannelWriterOutputView(writer, compressionFactory, BUFFER_SIZE);
for (int i = 0; i < testRounds; i++) {
outputView.writeInt(i);
}
outputView.close();
int blockCount = outputView.getBlockCount();
CompressedHeaderlessChannelReaderInputView inputView = new CompressedHeaderlessChannelReaderInputView(channel, ioManager, compressionFactory, BUFFER_SIZE, blockCount);
for (int i = 0; i < testRounds; i++) {
assertEquals(i, inputView.readInt());
}
inputView.close();
}
}
use of org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter in project flink by apache.
the class SpilledSubpartitionViewTest method createWriterAndWriteBuffers.
/**
* Returns a buffer file writer, to which the specified number of buffer write requests have
* been issued (including an end of partition event).
*
* <p> Call {@link BufferFileWriter#close()} to ensure that all buffers have been written.
*/
static BufferFileWriter createWriterAndWriteBuffers(IOManager ioManager, BufferProvider bufferProvider, int numberOfBuffers) throws IOException {
final BufferFileWriter writer = ioManager.createBufferFileWriter(ioManager.createChannel());
for (int i = 0; i < numberOfBuffers; i++) {
writer.writeBlock(bufferProvider.requestBuffer());
}
writer.writeBlock(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE));
return writer;
}
Aggregations