use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class BoundedBlockingSubpartitionTest method testRecycleCurrentBufferOnFailure.
@Test
public void testRecycleCurrentBufferOnFailure() throws Exception {
final ResultPartition resultPartition = createPartition(ResultPartitionType.BLOCKING, fileChannelManager);
final BoundedBlockingSubpartition subpartition = new BoundedBlockingSubpartition(0, resultPartition, new FailingBoundedData(), !sslEnabled && type == BoundedBlockingSubpartitionType.FILE);
final BufferConsumer consumer = BufferBuilderTestUtils.createFilledFinishedBufferConsumer(100);
try {
try {
subpartition.add(consumer);
subpartition.createReadView(new NoOpBufferAvailablityListener());
fail("should fail with an exception");
} catch (Exception ignored) {
// expected
}
assertFalse(consumer.isRecycled());
assertNotNull(subpartition.getCurrentBuffer());
assertFalse(subpartition.getCurrentBuffer().isRecycled());
} finally {
subpartition.release();
assertTrue(consumer.isRecycled());
assertNull(subpartition.getCurrentBuffer());
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class InputChannelTestUtils method createResultSubpartitionView.
public static ResultSubpartitionView createResultSubpartitionView(BufferConsumer... buffers) throws IOException {
int bufferSize = 4096;
PipelinedResultPartition parent = (PipelinedResultPartition) PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED, NoOpFileChannelManager.INSTANCE, true, bufferSize);
ResultSubpartition subpartition = parent.getAllPartitions()[0];
for (BufferConsumer buffer : buffers) {
subpartition.add(buffer);
}
return subpartition.createReadView(() -> {
});
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class InputGateFairnessTest method testFairConsumptionLocalChannels.
@Test
public void testFairConsumptionLocalChannels() throws Exception {
final int numberOfChannels = 37;
final int buffersPerChannel = 27;
PipelinedResultPartition[] resultPartitions = IntStream.range(0, numberOfChannels).mapToObj(i -> (PipelinedResultPartition) new ResultPartitionBuilder().build()).toArray(PipelinedResultPartition[]::new);
try (BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(42)) {
// ----- create some source channels and fill them with one buffer each -----
final PipelinedSubpartition[] sources = Arrays.stream(resultPartitions).map(resultPartition -> resultPartition.getAllPartitions()[0]).toArray(PipelinedSubpartition[]::new);
// ----- create reading side -----
final SingleInputGate gate = createFairnessVerifyingInputGate(numberOfChannels);
final InputChannel[] inputChannels = IntStream.range(0, numberOfChannels).mapToObj(i -> InputChannelBuilder.newBuilder().setChannelIndex(i).setPartitionManager(resultPartitions[i].partitionManager).setPartitionId(resultPartitions[i].getPartitionId()).buildLocalChannel(gate)).toArray(InputChannel[]::new);
for (ResultPartition rp : resultPartitions) {
rp.setup();
}
// seed one initial buffer
sources[12].add(bufferConsumer.copy());
setupInputGate(gate, inputChannels);
// read all the buffers and the EOF event
for (int i = 0; i < numberOfChannels * buffersPerChannel; i++) {
assertNotNull(gate.getNext());
int min = Integer.MAX_VALUE;
int max = 0;
for (PipelinedSubpartition source : sources) {
int size = source.getNumberOfQueuedBuffers();
min = Math.min(min, size);
max = Math.max(max, size);
}
assertTrue(max == min || max == min + 1);
if (i % (2 * numberOfChannels) == 0) {
// add three buffers to each channel, in random order
fillRandom(sources, 3, bufferConsumer);
}
}
// there is still more in the queues
}
}
Aggregations