use of org.apache.flink.runtime.io.network.util.TestSubpartitionConsumer in project flink by apache.
the class SpilledSubpartitionViewTest method testWriteConsume.
@Test
public void testWriteConsume() throws Exception {
// Config
final int numberOfBuffersToWrite = 512;
// Setup
final BufferFileWriter writer = createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, numberOfBuffersToWrite);
writer.close();
TestPooledBufferProvider viewBufferPool = new TestPooledBufferProvider(1);
TestSubpartitionConsumer consumer = new TestSubpartitionConsumer(false, new TestConsumerCallback.RecyclingCallback());
SpilledSubpartitionView view = new SpilledSubpartitionView(mock(ResultSubpartition.class), viewBufferPool.getMemorySegmentSize(), writer, // +1 for end-of-partition
numberOfBuffersToWrite + 1, consumer);
consumer.setSubpartitionView(view);
// Consume subpartition
consumer.call();
}
use of org.apache.flink.runtime.io.network.util.TestSubpartitionConsumer in project flink by apache.
the class SpilledSubpartitionViewTest method testConsumeWithFewBuffers.
@Test
public void testConsumeWithFewBuffers() throws Exception {
// Config
final int numberOfBuffersToWrite = 512;
// Setup
final BufferFileWriter writer = createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, numberOfBuffersToWrite);
writer.close();
TestSubpartitionConsumer consumer = new TestSubpartitionConsumer(false, new TestConsumerCallback.RecyclingCallback());
SpilledSubpartitionView view = new SpilledSubpartitionView(mock(ResultSubpartition.class), 32 * 1024, writer, numberOfBuffersToWrite + 1, consumer);
consumer.setSubpartitionView(view);
// No buffer available, don't deadlock. We need to make progress in situations when the view
// is consumed at an input gate with local and remote channels. The remote channels might
// eat up all the buffers, at which point the spilled view will not have any buffers
// available and the input gate can't make any progress if we don't return immediately.
//
// The current solution is straight-forward with a separate buffer per spilled subpartition,
// but introduces memory-overhead.
//
// TODO Replace with asynchronous buffer pool request as this introduces extra buffers per
// consumed subpartition.
consumer.call();
}
use of org.apache.flink.runtime.io.network.util.TestSubpartitionConsumer in project flink by apache.
the class PipelinedSubpartitionTest method testProduceConsume.
private void testProduceConsume(boolean isSlowProducer, boolean isSlowConsumer) throws Exception {
// Config
final int producerBufferPoolSize = 8;
final int producerNumberOfBuffersToProduce = 128;
// Producer behaviour
final TestProducerSource producerSource = new TestProducerSource() {
private BufferProvider bufferProvider = new TestPooledBufferProvider(producerBufferPoolSize);
private int numberOfBuffers;
@Override
public BufferOrEvent getNextBufferOrEvent() throws Exception {
if (numberOfBuffers == producerNumberOfBuffersToProduce) {
return null;
}
final Buffer buffer = bufferProvider.requestBufferBlocking();
final MemorySegment segment = buffer.getMemorySegment();
int next = numberOfBuffers * (segment.size() / 4);
for (int i = 0; i < segment.size(); i += 4) {
segment.putInt(i, next);
next++;
}
numberOfBuffers++;
return new BufferOrEvent(buffer, 0);
}
};
// Consumer behaviour
final TestConsumerCallback consumerCallback = new TestConsumerCallback() {
private int numberOfBuffers;
@Override
public void onBuffer(Buffer buffer) {
final MemorySegment segment = buffer.getMemorySegment();
int expected = numberOfBuffers * (segment.size() / 4);
for (int i = 0; i < segment.size(); i += 4) {
assertEquals(expected, segment.getInt(i));
expected++;
}
numberOfBuffers++;
buffer.recycle();
}
@Override
public void onEvent(AbstractEvent event) {
// Nothing to do in this test
}
};
final PipelinedSubpartition subpartition = createSubpartition();
TestSubpartitionConsumer consumer = new TestSubpartitionConsumer(isSlowConsumer, consumerCallback);
final PipelinedSubpartitionView view = subpartition.createReadView(null, consumer);
consumer.setSubpartitionView(view);
Future<Boolean> producerResult = executorService.submit(new TestSubpartitionProducer(subpartition, isSlowProducer, producerSource));
Future<Boolean> consumerResult = executorService.submit(consumer);
// Wait for producer and consumer to finish
producerResult.get();
consumerResult.get();
}
use of org.apache.flink.runtime.io.network.util.TestSubpartitionConsumer in project flink by apache.
the class SpilledSubpartitionViewTest method testReadMultipleFilesWithSingleBufferPool.
@Test
public void testReadMultipleFilesWithSingleBufferPool() throws Exception {
ExecutorService executor = null;
BufferFileWriter[] writers = null;
ResultSubpartitionView[] readers = null;
try {
executor = Executors.newCachedThreadPool();
// Setup
writers = new BufferFileWriter[] { createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, 512), createWriterAndWriteBuffers(IO_MANAGER, writerBufferPool, 512) };
readers = new ResultSubpartitionView[writers.length];
TestSubpartitionConsumer[] consumers = new TestSubpartitionConsumer[writers.length];
BufferProvider inputBuffers = new TestPooledBufferProvider(2);
ResultSubpartition parent = mock(ResultSubpartition.class);
// Wait for writers to finish
for (BufferFileWriter writer : writers) {
writer.close();
}
// Create the views depending on the test configuration
for (int i = 0; i < readers.length; i++) {
consumers[i] = new TestSubpartitionConsumer(false, new TestConsumerCallback.RecyclingCallback());
readers[i] = new SpilledSubpartitionView(parent, inputBuffers.getMemorySegmentSize(), writers[i], // +1 for end of partition event
512 + 1, consumers[i]);
consumers[i].setSubpartitionView(readers[i]);
}
final List<Future<Boolean>> results = Lists.newArrayList();
// Submit the consuming tasks
for (TestSubpartitionConsumer consumer : consumers) {
results.add(executor.submit(consumer));
}
// Wait for the results
for (Future<Boolean> res : results) {
try {
res.get(2, TimeUnit.MINUTES);
} catch (TimeoutException e) {
throw new TimeoutException("There has been a timeout in the test. This " + "indicates that there is a bug/deadlock in the tested subpartition " + "view.");
}
}
} finally {
if (writers != null) {
for (BufferFileWriter writer : writers) {
if (writer != null) {
writer.deleteChannel();
}
}
}
if (readers != null) {
for (ResultSubpartitionView reader : readers) {
if (reader != null) {
reader.releaseAllResources();
}
}
}
if (executor != null) {
executor.shutdown();
}
}
}
Aggregations