use of org.apache.flink.runtime.io.network.partition.ResultPartitionBuilder in project flink by apache.
the class LocalInputChannelTest method testConcurrentConsumeMultiplePartitions.
/**
* Tests the consumption of multiple subpartitions via local input channels.
*
* <p>Multiple producer tasks produce pipelined partitions, which are consumed by multiple tasks
* via local input channels.
*/
@Test
public void testConcurrentConsumeMultiplePartitions() throws Exception {
// Config
final int parallelism = 32;
final int producerBufferPoolSize = parallelism + 1;
final int numberOfBuffersPerChannel = 1024;
// Setup
// One thread per produced partition and one per consumer
final ExecutorService executor = Executors.newFixedThreadPool(2 * parallelism);
final NetworkBufferPool networkBuffers = new NetworkBufferPool((parallelism * producerBufferPoolSize) + (parallelism * parallelism), TestBufferFactory.BUFFER_SIZE);
final ResultPartitionManager partitionManager = new ResultPartitionManager();
final ResultPartitionID[] partitionIds = new ResultPartitionID[parallelism];
final TestPartitionProducer[] partitionProducers = new TestPartitionProducer[parallelism];
// Create all partitions
for (int i = 0; i < parallelism; i++) {
partitionIds[i] = new ResultPartitionID();
final ResultPartition partition = new ResultPartitionBuilder().setResultPartitionId(partitionIds[i]).setNumberOfSubpartitions(parallelism).setNumTargetKeyGroups(parallelism).setResultPartitionManager(partitionManager).setBufferPoolFactory(() -> networkBuffers.createBufferPool(producerBufferPoolSize, producerBufferPoolSize, parallelism, Integer.MAX_VALUE)).build();
// Create a buffer pool for this partition
partition.setup();
// Create the producer
partitionProducers[i] = new TestPartitionProducer((BufferWritingResultPartition) partition, false, new TestPartitionProducerBufferSource(parallelism, TestBufferFactory.BUFFER_SIZE, numberOfBuffersPerChannel));
}
// Test
try {
// Submit producer tasks
List<CompletableFuture<?>> results = Lists.newArrayListWithCapacity(parallelism + 1);
for (int i = 0; i < parallelism; i++) {
results.add(CompletableFuture.supplyAsync(CheckedSupplier.unchecked(partitionProducers[i]::call), executor));
}
// Submit consumer
for (int i = 0; i < parallelism; i++) {
final TestLocalInputChannelConsumer consumer = new TestLocalInputChannelConsumer(i, parallelism, numberOfBuffersPerChannel, networkBuffers.createBufferPool(parallelism, parallelism), partitionManager, new TaskEventDispatcher(), partitionIds);
results.add(CompletableFuture.supplyAsync(CheckedSupplier.unchecked(consumer::call), executor));
}
FutureUtils.waitForAll(results).get();
} finally {
networkBuffers.destroyAllBufferPools();
networkBuffers.destroy();
executor.shutdown();
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartitionBuilder in project flink by apache.
the class SingleInputGateTest method testUpdateUnknownInputChannel.
/**
* Tests that input gate can successfully convert unknown input channels into local and remote
* channels.
*/
@Test
public void testUpdateUnknownInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final ResultPartition localResultPartition = new ResultPartitionBuilder().setResultPartitionManager(network.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(network).build();
final ResultPartition remoteResultPartition = new ResultPartitionBuilder().setResultPartitionManager(network.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(network).build();
localResultPartition.setup();
remoteResultPartition.setup();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
final InputChannel[] inputChannels = new InputChannel[2];
try (Closer closer = Closer.create()) {
closer.register(network::close);
closer.register(inputGate::close);
final ResultPartitionID localResultPartitionId = localResultPartition.getPartitionId();
inputChannels[0] = buildUnknownInputChannel(network, inputGate, localResultPartitionId, 0);
final ResultPartitionID remoteResultPartitionId = remoteResultPartition.getPartitionId();
inputChannels[1] = buildUnknownInputChannel(network, inputGate, remoteResultPartitionId, 1);
inputGate.setInputChannels(inputChannels);
inputGate.setup();
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(remoteResultPartitionId.getPartitionId())), is(instanceOf((UnknownInputChannel.class))));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(localResultPartitionId.getPartitionId())), is(instanceOf((UnknownInputChannel.class))));
ResourceID localLocation = ResourceID.generate();
// Trigger updates to remote input channel from unknown input channel
inputGate.updateInputChannel(localLocation, createRemoteWithIdAndLocation(remoteResultPartitionId.getPartitionId(), ResourceID.generate()));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(remoteResultPartitionId.getPartitionId())), is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(localResultPartitionId.getPartitionId())), is(instanceOf((UnknownInputChannel.class))));
// Trigger updates to local input channel from unknown input channel
inputGate.updateInputChannel(localLocation, createRemoteWithIdAndLocation(localResultPartitionId.getPartitionId(), localLocation));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(remoteResultPartitionId.getPartitionId())), is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(localResultPartitionId.getPartitionId())), is(instanceOf((LocalInputChannel.class))));
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartitionBuilder in project flink by apache.
the class LocalInputChannelTest method testAnnounceNewBufferSize.
@Test
public void testAnnounceNewBufferSize() throws IOException, InterruptedException {
// given: Configured LocalInputChannel and pipelined subpartition.
PipelinedResultPartition parent = (PipelinedResultPartition) new ResultPartitionBuilder().setResultPartitionType(ResultPartitionType.PIPELINED).setFileChannelManager(NoOpFileChannelManager.INSTANCE).setNumberOfSubpartitions(2).build();
ResultSubpartition subpartition0 = parent.getAllPartitions()[0];
ResultSubpartition subpartition1 = parent.getAllPartitions()[1];
LocalInputChannel channel0 = createLocalInputChannel(new SingleInputGateBuilder().build(), new TestingResultPartitionManager(subpartition0.createReadView(() -> {
})));
LocalInputChannel channel1 = createLocalInputChannel(new SingleInputGateBuilder().build(), new TestingResultPartitionManager(subpartition1.createReadView(() -> {
})));
channel0.requestSubpartition();
channel1.requestSubpartition();
// and: Preferable buffer size is default value.
assertEquals(Integer.MAX_VALUE, subpartition0.add(createFilledFinishedBufferConsumer(16)));
assertEquals(Integer.MAX_VALUE, subpartition1.add(createFilledFinishedBufferConsumer(16)));
// when: Announce the different buffer size for different channels via LocalInputChannel.
channel0.announceBufferSize(9);
channel1.announceBufferSize(20);
// then: The corresponded subpartitions have the new size.
assertEquals(9, subpartition0.add(createFilledFinishedBufferConsumer(16)));
assertEquals(20, subpartition1.add(createFilledFinishedBufferConsumer(16)));
}
use of org.apache.flink.runtime.io.network.partition.ResultPartitionBuilder in project flink by apache.
the class ChannelPersistenceITCase method buildResultPartition.
private BufferWritingResultPartition buildResultPartition(NetworkBufferPool networkBufferPool, ResultPartitionType resultPartitionType, int index, int numberOfSubpartitions) throws IOException {
ResultPartition resultPartition = new ResultPartitionBuilder().setResultPartitionIndex(index).setResultPartitionType(resultPartitionType).setNumberOfSubpartitions(numberOfSubpartitions).setBufferPoolFactory(() -> networkBufferPool.createBufferPool(numberOfSubpartitions, Integer.MAX_VALUE, numberOfSubpartitions, Integer.MAX_VALUE)).build();
resultPartition.setup();
return (BufferWritingResultPartition) resultPartition;
}
use of org.apache.flink.runtime.io.network.partition.ResultPartitionBuilder in project flink by apache.
the class StreamNetworkBenchmarkEnvironment method createResultPartitionWriter.
public ResultPartitionWriter createResultPartitionWriter(int partitionIndex) throws Exception {
ResultPartitionWriter resultPartitionWriter = new ResultPartitionBuilder().setResultPartitionId(partitionIds[partitionIndex]).setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED).setNumberOfSubpartitions(channels).setResultPartitionManager(senderEnv.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(senderEnv).build();
resultPartitionWriter.setup();
return resultPartitionWriter;
}
Aggregations