use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class ChannelPersistenceITCase method testReadWritten.
@Test
public void testReadWritten() throws Exception {
byte[] inputChannelInfoData = randomBytes(1024);
byte[] resultSubpartitionInfoData = randomBytes(1024);
int partitionIndex = 0;
SequentialChannelStateReader reader = new SequentialChannelStateReaderImpl(toTaskStateSnapshot(write(1L, singletonMap(new InputChannelInfo(0, 0), inputChannelInfoData), singletonMap(new ResultSubpartitionInfo(partitionIndex, 0), resultSubpartitionInfoData))));
NetworkBufferPool networkBufferPool = new NetworkBufferPool(4, 1024);
try {
int numChannels = 1;
InputGate gate = buildGate(networkBufferPool, numChannels);
reader.readInputData(new InputGate[] { gate });
assertArrayEquals(inputChannelInfoData, collectBytes(gate::pollNext, BufferOrEvent::getBuffer));
BufferWritingResultPartition resultPartition = buildResultPartition(networkBufferPool, ResultPartitionType.PIPELINED, partitionIndex, numChannels);
reader.readOutputData(new BufferWritingResultPartition[] { resultPartition }, false);
ResultSubpartitionView view = resultPartition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
assertArrayEquals(resultSubpartitionInfoData, collectBytes(() -> Optional.ofNullable(view.getNextBuffer()), BufferAndBacklog::buffer));
} finally {
networkBufferPool.destroy();
}
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterTest method testBroadcastEmitRecord.
/**
* Tests that records are broadcast via {@link RecordWriter#broadcastEmit(IOReadableWritable)}.
*/
@Test
public void testBroadcastEmitRecord() throws Exception {
final int numberOfChannels = 4;
final int bufferSize = 32;
final int numValues = 8;
final int serializationLength = 4;
final ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
final RecordWriter<SerializationTestType> writer = createRecordWriter(partition);
final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(new String[] { tempFolder.getRoot().getAbsolutePath() });
final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>();
final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT);
for (SerializationTestType record : records) {
serializedRecords.add(record);
writer.broadcastEmit(record);
}
final int numRequiredBuffers = numValues / (bufferSize / (4 + serializationLength));
if (isBroadcastWriter) {
assertEquals(numRequiredBuffers, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
} else {
assertEquals(numRequiredBuffers * numberOfChannels, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
}
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(numRequiredBuffers, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
verifyDeserializationResults(view, deserializer, serializedRecords.clone(), numRequiredBuffers, numValues);
}
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterTest method testBroadcastEventNoRecords.
// ---------------------------------------------------------------------------------------------
// Resource release tests
// ---------------------------------------------------------------------------------------------
/**
* Tests broadcasting events when no records have been emitted yet.
*/
@Test
public void testBroadcastEventNoRecords() throws Exception {
int numberOfChannels = 4;
int bufferSize = 32;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation());
// No records emitted yet, broadcast should not request a buffer
writer.broadcastEvent(barrier);
assertEquals(0, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
assertFalse(view.getAvailabilityAndBacklog(Integer.MAX_VALUE).isAvailable());
}
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class PartitionRequestQueueTest method testEnqueueReaderByResumingConsumption.
/**
* Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
* verifying the reader would be enqueued in the pipeline after resuming data consumption if
* there are credit and data available.
*/
@Test
public void testEnqueueReaderByResumingConsumption() throws Exception {
PipelinedSubpartition subpartition = PipelinedSubpartitionTest.createPipelinedSubpartition();
Buffer.DataType dataType1 = Buffer.DataType.ALIGNED_CHECKPOINT_BARRIER;
Buffer.DataType dataType2 = Buffer.DataType.DATA_BUFFER;
subpartition.add(createEventBufferConsumer(4096, dataType1));
subpartition.add(createEventBufferConsumer(4096, dataType2));
BufferAvailabilityListener bufferAvailabilityListener = new NoOpBufferAvailablityListener();
PipelinedSubpartitionView view = subpartition.createReadView(bufferAvailabilityListener);
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
InputChannelID receiverId = new InputChannelID();
PartitionRequestQueue queue = new PartitionRequestQueue();
CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 2, queue);
EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
queue.notifyReaderCreated(reader);
assertTrue(reader.getAvailabilityAndBacklog().isAvailable());
reader.notifyDataAvailable();
channel.runPendingTasks();
assertFalse(reader.getAvailabilityAndBacklog().isAvailable());
assertEquals(1, subpartition.unsynchronizedGetNumberOfQueuedBuffers());
queue.addCreditOrResumeConsumption(receiverId, NetworkSequenceViewReader::resumeConsumption);
assertFalse(reader.getAvailabilityAndBacklog().isAvailable());
assertEquals(0, subpartition.unsynchronizedGetNumberOfQueuedBuffers());
Object data1 = channel.readOutbound();
assertEquals(dataType1, ((NettyMessage.BufferResponse) data1).buffer.getDataType());
Object data2 = channel.readOutbound();
assertEquals(dataType2, ((NettyMessage.BufferResponse) data2).buffer.getDataType());
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class PartitionRequestQueueTest method testAnnounceBacklog.
@Test
public void testAnnounceBacklog() throws Exception {
PipelinedSubpartition subpartition = PipelinedSubpartitionTest.createPipelinedSubpartition();
subpartition.add(createEventBufferConsumer(4096, Buffer.DataType.DATA_BUFFER));
subpartition.add(createEventBufferConsumer(4096, Buffer.DataType.DATA_BUFFER));
PipelinedSubpartitionView view = subpartition.createReadView(new NoOpBufferAvailablityListener());
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
PartitionRequestQueue queue = new PartitionRequestQueue();
InputChannelID receiverId = new InputChannelID();
CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
queue.notifyReaderCreated(reader);
reader.notifyDataAvailable();
channel.runPendingTasks();
Object data = channel.readOutbound();
assertTrue(data instanceof NettyMessage.BacklogAnnouncement);
NettyMessage.BacklogAnnouncement announcement = (NettyMessage.BacklogAnnouncement) data;
assertEquals(receiverId, announcement.receiverId);
assertEquals(subpartition.getBuffersInBacklogUnsafe(), announcement.backlog);
subpartition.release();
reader.notifyDataAvailable();
channel.runPendingTasks();
assertNotNull(channel.readOutbound());
}
Aggregations