use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterTest method createResultPartition.
public static ResultPartition createResultPartition(int bufferSize, int numSubpartitions) throws IOException {
NettyShuffleEnvironment env = new NettyShuffleEnvironmentBuilder().setBufferSize(bufferSize).build();
ResultPartition partition = createPartition(env, ResultPartitionType.PIPELINED, numSubpartitions);
partition.setup();
return partition;
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterTest method testBroadcastEmitRecord.
/**
* Tests that records are broadcast via {@link RecordWriter#broadcastEmit(IOReadableWritable)}.
*/
@Test
public void testBroadcastEmitRecord() throws Exception {
final int numberOfChannels = 4;
final int bufferSize = 32;
final int numValues = 8;
final int serializationLength = 4;
final ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
final RecordWriter<SerializationTestType> writer = createRecordWriter(partition);
final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(new String[] { tempFolder.getRoot().getAbsolutePath() });
final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>();
final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT);
for (SerializationTestType record : records) {
serializedRecords.add(record);
writer.broadcastEmit(record);
}
final int numRequiredBuffers = numValues / (bufferSize / (4 + serializationLength));
if (isBroadcastWriter) {
assertEquals(numRequiredBuffers, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
} else {
assertEquals(numRequiredBuffers * numberOfChannels, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
}
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(numRequiredBuffers, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
verifyDeserializationResults(view, deserializer, serializedRecords.clone(), numRequiredBuffers, numValues);
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterTest method testBroadcastEventNoRecords.
// ---------------------------------------------------------------------------------------------
// Resource release tests
// ---------------------------------------------------------------------------------------------
/**
* Tests broadcasting events when no records have been emitted yet.
*/
@Test
public void testBroadcastEventNoRecords() throws Exception {
int numberOfChannels = 4;
int bufferSize = 32;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation());
// No records emitted yet, broadcast should not request a buffer
writer.broadcastEvent(barrier);
assertEquals(0, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
assertFalse(view.getAvailabilityAndBacklog(Integer.MAX_VALUE).isAvailable());
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class PartitionRequestQueueTest method testCancelPartitionRequest.
private void testCancelPartitionRequest(boolean isAvailableView) throws Exception {
// setup
final ResultPartitionManager partitionManager = new ResultPartitionManager();
final ResultPartition partition = createFinishedPartitionWithFilledData(partitionManager);
final InputChannelID receiverId = new InputChannelID();
final PartitionRequestQueue queue = new PartitionRequestQueue();
final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 2, queue);
final EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionManager, partition.getPartitionId(), 0);
// add this reader into allReaders queue
queue.notifyReaderCreated(reader);
// block the channel so that we see an intermediate state in the test
blockChannel(channel);
// add credit to make this reader available for adding into availableReaders queue
if (isAvailableView) {
queue.addCreditOrResumeConsumption(receiverId, viewReader -> viewReader.addCredit(1));
assertTrue(queue.getAvailableReaders().contains(reader));
}
// cancel this subpartition view
queue.cancel(receiverId);
channel.runPendingTasks();
assertFalse(queue.getAvailableReaders().contains(reader));
// the reader view should be released (the partition is not, though, blocking partitions
// support multiple successive readers for recovery and caching)
assertTrue(reader.isReleased());
// cleanup
partition.release();
channel.close();
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class PartitionRequestServerHandlerTest method testAcknowledgeAllRecordsProcessed.
@Test
public void testAcknowledgeAllRecordsProcessed() throws IOException {
InputChannelID inputChannelID = new InputChannelID();
ResultPartition resultPartition = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> resultPartition.createSubpartitionView(index, availabilityListener);
// Creates the netty network handler stack.
PartitionRequestQueue partitionRequestQueue = new PartitionRequestQueue();
final PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler(new ResultPartitionManager(), new TaskEventDispatcher(), partitionRequestQueue);
final EmbeddedChannel channel = new EmbeddedChannel(serverHandler, partitionRequestQueue);
// Creates and registers the view to netty.
NetworkSequenceViewReader viewReader = new CreditBasedSequenceNumberingViewReader(inputChannelID, 2, partitionRequestQueue);
viewReader.requestSubpartitionView(partitionProvider, resultPartition.getPartitionId(), 0);
partitionRequestQueue.notifyReaderCreated(viewReader);
// Write the message to acknowledge all records are processed to server
resultPartition.notifyEndOfData(StopMode.DRAIN);
CompletableFuture<Void> allRecordsProcessedFuture = resultPartition.getAllDataProcessedFuture();
assertFalse(allRecordsProcessedFuture.isDone());
channel.writeInbound(new NettyMessage.AckAllUserRecordsProcessed(inputChannelID));
channel.runPendingTasks();
assertTrue(allRecordsProcessedFuture.isDone());
assertFalse(allRecordsProcessedFuture.isCompletedExceptionally());
}
Aggregations