use of org.apache.flink.runtime.io.network.partition.BufferWritingResultPartition in project flink by apache.
the class SequentialChannelStateReaderImplTest method collectBuffers.
private Map<ResultSubpartitionInfo, List<Buffer>> collectBuffers(BufferWritingResultPartition[] resultPartitions) throws IOException {
Map<ResultSubpartitionInfo, List<Buffer>> actual = new HashMap<>();
for (BufferWritingResultPartition resultPartition : resultPartitions) {
for (int i = 0; i < resultPartition.getNumberOfSubpartitions(); i++) {
ResultSubpartitionInfo info = resultPartition.getAllPartitions()[i].getSubpartitionInfo();
ResultSubpartitionView view = resultPartition.createSubpartitionView(info.getSubPartitionIdx(), new NoOpBufferAvailablityListener());
for (BufferAndBacklog buffer = view.getNextBuffer(); buffer != null; buffer = view.getNextBuffer()) {
if (buffer.buffer().isBuffer()) {
actual.computeIfAbsent(info, unused -> new ArrayList<>()).add(buffer.buffer());
}
}
}
}
return actual;
}
use of org.apache.flink.runtime.io.network.partition.BufferWritingResultPartition in project flink by apache.
the class SequentialChannelStateReaderImplTest method withResultPartitions.
private void withResultPartitions(ThrowingConsumer<BufferWritingResultPartition[], Exception> action) throws Exception {
int segmentsToAllocate = parLevel * parLevel * buffersPerChannel;
NetworkBufferPool networkBufferPool = new NetworkBufferPool(segmentsToAllocate, bufferSize);
BufferWritingResultPartition[] resultPartitions = range(0, parLevel).mapToObj(i -> new ResultPartitionBuilder().setResultPartitionIndex(i).setNumberOfSubpartitions(parLevel).setNetworkBufferPool(networkBufferPool).build()).toArray(BufferWritingResultPartition[]::new);
try {
for (ResultPartition resultPartition : resultPartitions) {
resultPartition.setup();
}
action.accept(resultPartitions);
} finally {
for (ResultPartition resultPartition : resultPartitions) {
resultPartition.close();
}
try {
assertEquals(segmentsToAllocate, networkBufferPool.getNumberOfAvailableMemorySegments());
} finally {
networkBufferPool.destroyAllBufferPools();
networkBufferPool.destroy();
}
}
}
use of org.apache.flink.runtime.io.network.partition.BufferWritingResultPartition in project flink by apache.
the class SingleInputGateTest method testQueuedBuffers.
@Test
public void testQueuedBuffers() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final BufferWritingResultPartition resultPartition = (BufferWritingResultPartition) new ResultPartitionBuilder().setResultPartitionManager(network.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(network).build();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
final ResultPartitionID localResultPartitionId = resultPartition.getPartitionId();
final InputChannel[] inputChannels = new InputChannel[2];
final RemoteInputChannel remoteInputChannel = InputChannelBuilder.newBuilder().setChannelIndex(1).setupFromNettyShuffleEnvironment(network).setConnectionManager(new TestingConnectionManager()).buildRemoteChannel(inputGate);
inputChannels[0] = remoteInputChannel;
inputChannels[1] = InputChannelBuilder.newBuilder().setChannelIndex(0).setPartitionId(localResultPartitionId).setupFromNettyShuffleEnvironment(network).setConnectionManager(new TestingConnectionManager()).buildLocalChannel(inputGate);
try (Closer closer = Closer.create()) {
closer.register(network::close);
closer.register(inputGate::close);
closer.register(resultPartition::release);
resultPartition.setup();
setupInputGate(inputGate, inputChannels);
remoteInputChannel.onBuffer(createBuffer(1), 0, 0);
assertEquals(1, inputGate.getNumberOfQueuedBuffers());
resultPartition.emitRecord(ByteBuffer.allocate(1), 0);
assertEquals(2, inputGate.getNumberOfQueuedBuffers());
}
}
use of org.apache.flink.runtime.io.network.partition.BufferWritingResultPartition in project flink by apache.
the class ChannelPersistenceITCase method upstreamBlocksAfterRecoveringState.
private void upstreamBlocksAfterRecoveringState(ResultPartitionType type) throws Exception {
NetworkBufferPool networkBufferPool = new NetworkBufferPool(4, 1024);
byte[] dataAfterRecovery = randomBytes(1024);
try {
BufferWritingResultPartition resultPartition = buildResultPartition(networkBufferPool, type, 0, 1);
new SequentialChannelStateReaderImpl(new TaskStateSnapshot()).readOutputData(new BufferWritingResultPartition[] { resultPartition }, true);
resultPartition.emitRecord(ByteBuffer.wrap(dataAfterRecovery), 0);
ResultSubpartitionView view = resultPartition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
if (type != ResultPartitionType.PIPELINED_APPROXIMATE) {
assertEquals(RECOVERY_COMPLETION, view.getNextBuffer().buffer().getDataType());
assertNull(view.getNextBuffer());
view.resumeConsumption();
}
assertArrayEquals(dataAfterRecovery, collectBytes(view.getNextBuffer().buffer()));
} finally {
networkBufferPool.destroy();
}
}
use of org.apache.flink.runtime.io.network.partition.BufferWritingResultPartition in project flink by apache.
the class LocalInputChannelTest method testConcurrentConsumeMultiplePartitions.
/**
* Tests the consumption of multiple subpartitions via local input channels.
*
* <p>Multiple producer tasks produce pipelined partitions, which are consumed by multiple tasks
* via local input channels.
*/
@Test
public void testConcurrentConsumeMultiplePartitions() throws Exception {
// Config
final int parallelism = 32;
final int producerBufferPoolSize = parallelism + 1;
final int numberOfBuffersPerChannel = 1024;
// Setup
// One thread per produced partition and one per consumer
final ExecutorService executor = Executors.newFixedThreadPool(2 * parallelism);
final NetworkBufferPool networkBuffers = new NetworkBufferPool((parallelism * producerBufferPoolSize) + (parallelism * parallelism), TestBufferFactory.BUFFER_SIZE);
final ResultPartitionManager partitionManager = new ResultPartitionManager();
final ResultPartitionID[] partitionIds = new ResultPartitionID[parallelism];
final TestPartitionProducer[] partitionProducers = new TestPartitionProducer[parallelism];
// Create all partitions
for (int i = 0; i < parallelism; i++) {
partitionIds[i] = new ResultPartitionID();
final ResultPartition partition = new ResultPartitionBuilder().setResultPartitionId(partitionIds[i]).setNumberOfSubpartitions(parallelism).setNumTargetKeyGroups(parallelism).setResultPartitionManager(partitionManager).setBufferPoolFactory(() -> networkBuffers.createBufferPool(producerBufferPoolSize, producerBufferPoolSize, parallelism, Integer.MAX_VALUE)).build();
// Create a buffer pool for this partition
partition.setup();
// Create the producer
partitionProducers[i] = new TestPartitionProducer((BufferWritingResultPartition) partition, false, new TestPartitionProducerBufferSource(parallelism, TestBufferFactory.BUFFER_SIZE, numberOfBuffersPerChannel));
}
// Test
try {
// Submit producer tasks
List<CompletableFuture<?>> results = Lists.newArrayListWithCapacity(parallelism + 1);
for (int i = 0; i < parallelism; i++) {
results.add(CompletableFuture.supplyAsync(CheckedSupplier.unchecked(partitionProducers[i]::call), executor));
}
// Submit consumer
for (int i = 0; i < parallelism; i++) {
final TestLocalInputChannelConsumer consumer = new TestLocalInputChannelConsumer(i, parallelism, numberOfBuffersPerChannel, networkBuffers.createBufferPool(parallelism, parallelism), partitionManager, new TaskEventDispatcher(), partitionIds);
results.add(CompletableFuture.supplyAsync(CheckedSupplier.unchecked(consumer::call), executor));
}
FutureUtils.waitForAll(results).get();
} finally {
networkBuffers.destroyAllBufferPools();
networkBuffers.destroy();
executor.shutdown();
}
}
Aggregations