use of org.apache.flink.runtime.io.network.util.TestPartitionProducer in project flink by apache.
the class LocalInputChannelTest method testConcurrentConsumeMultiplePartitions.
/**
* Tests the consumption of multiple subpartitions via local input channels.
*
* <p> Multiple producer tasks produce pipelined partitions, which are consumed by multiple
* tasks via local input channels.
*/
@Test
public void testConcurrentConsumeMultiplePartitions() throws Exception {
// Config
final int parallelism = 32;
final int producerBufferPoolSize = parallelism + 1;
final int numberOfBuffersPerChannel = 1024;
checkArgument(parallelism >= 1);
checkArgument(producerBufferPoolSize >= parallelism);
checkArgument(numberOfBuffersPerChannel >= 1);
// Setup
// One thread per produced partition and one per consumer
final ExecutorService executor = Executors.newFixedThreadPool(2 * parallelism);
final NetworkBufferPool networkBuffers = new NetworkBufferPool((parallelism * producerBufferPoolSize) + (parallelism * parallelism), TestBufferFactory.BUFFER_SIZE, MemoryType.HEAP);
final ResultPartitionConsumableNotifier partitionConsumableNotifier = mock(ResultPartitionConsumableNotifier.class);
final TaskActions taskActions = mock(TaskActions.class);
final IOManager ioManager = mock(IOManager.class);
final JobID jobId = new JobID();
final ResultPartitionManager partitionManager = new ResultPartitionManager();
final ResultPartitionID[] partitionIds = new ResultPartitionID[parallelism];
final TestPartitionProducer[] partitionProducers = new TestPartitionProducer[parallelism];
// Create all partitions
for (int i = 0; i < parallelism; i++) {
partitionIds[i] = new ResultPartitionID();
final ResultPartition partition = new ResultPartition("Test Name", taskActions, jobId, partitionIds[i], ResultPartitionType.PIPELINED, parallelism, parallelism, partitionManager, partitionConsumableNotifier, ioManager, true);
// Create a buffer pool for this partition
partition.registerBufferPool(networkBuffers.createBufferPool(producerBufferPoolSize, producerBufferPoolSize));
// Create the producer
partitionProducers[i] = new TestPartitionProducer(partition, false, new TestPartitionProducerBufferSource(parallelism, partition.getBufferProvider(), numberOfBuffersPerChannel));
// Register with the partition manager in order to allow the local input channels to
// request their respective partitions.
partitionManager.registerResultPartition(partition);
}
// Test
try {
// Submit producer tasks
List<Future<?>> results = Lists.newArrayListWithCapacity(parallelism + 1);
for (int i = 0; i < parallelism; i++) {
results.add(executor.submit(partitionProducers[i]));
}
// Submit consumer
for (int i = 0; i < parallelism; i++) {
results.add(executor.submit(new TestLocalInputChannelConsumer(i, parallelism, numberOfBuffersPerChannel, networkBuffers.createBufferPool(parallelism, parallelism), partitionManager, new TaskEventDispatcher(), partitionIds)));
}
// Wait for all to finish
for (Future<?> result : results) {
result.get();
}
} finally {
networkBuffers.destroy();
executor.shutdown();
}
}
Aggregations