use of org.apache.flink.runtime.checkpoint.channel.InputChannelInfo in project flink by apache.
the class ChannelStatePersisterTest method testLateBarrierTriggeringCheckpoint.
@Test(expected = CheckpointException.class)
public void testLateBarrierTriggeringCheckpoint() throws Exception {
ChannelStatePersister persister = new ChannelStatePersister(ChannelStateWriter.NO_OP, new InputChannelInfo(0, 0));
long lateCheckpointId = 1L;
long checkpointId = 2L;
persister.checkForBarrier(barrier(checkpointId));
persister.startPersisting(lateCheckpointId, Collections.emptyList());
}
use of org.apache.flink.runtime.checkpoint.channel.InputChannelInfo in project flink by apache.
the class ChannelStatePersisterTest method testNewBarrierNotOverwrittenByStopPersisting.
@Test
public void testNewBarrierNotOverwrittenByStopPersisting() throws Exception {
RecordingChannelStateWriter channelStateWriter = new RecordingChannelStateWriter();
InputChannelInfo channelInfo = new InputChannelInfo(0, 0);
ChannelStatePersister persister = new ChannelStatePersister(channelStateWriter, channelInfo);
long checkpointId = 1L;
channelStateWriter.start(checkpointId, CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, getDefault()));
persister.checkForBarrier(barrier(checkpointId));
persister.startPersisting(checkpointId, Arrays.asList(buildSomeBuffer()));
assertEquals(1, channelStateWriter.getAddedInput().get(channelInfo).size());
persister.maybePersist(buildSomeBuffer());
assertEquals(1, channelStateWriter.getAddedInput().get(channelInfo).size());
// meanwhile, checkpoint coordinator timed out the 1st checkpoint and started the 2nd
// now task thread is picking up the barrier and aborts the 1st:
persister.checkForBarrier(barrier(checkpointId + 1));
persister.maybePersist(buildSomeBuffer());
persister.stopPersisting(checkpointId);
persister.maybePersist(buildSomeBuffer());
assertEquals(1, channelStateWriter.getAddedInput().get(channelInfo).size());
assertTrue(persister.hasBarrierReceived());
}
use of org.apache.flink.runtime.checkpoint.channel.InputChannelInfo in project flink by apache.
the class ChannelState method prioritizeAllAnnouncements.
public void prioritizeAllAnnouncements() throws IOException {
for (Map.Entry<InputChannelInfo, Integer> announcedNumberInChannel : sequenceNumberInAnnouncedChannels.entrySet()) {
InputChannelInfo channelInfo = announcedNumberInChannel.getKey();
inputs[channelInfo.getGateIdx()].convertToPriorityEvent(channelInfo.getInputChannelIdx(), announcedNumberInChannel.getValue());
}
sequenceNumberInAnnouncedChannels.clear();
}
use of org.apache.flink.runtime.checkpoint.channel.InputChannelInfo in project flink by apache.
the class ChannelPersistenceITCase method testReadWritten.
@Test
public void testReadWritten() throws Exception {
byte[] inputChannelInfoData = randomBytes(1024);
byte[] resultSubpartitionInfoData = randomBytes(1024);
int partitionIndex = 0;
SequentialChannelStateReader reader = new SequentialChannelStateReaderImpl(toTaskStateSnapshot(write(1L, singletonMap(new InputChannelInfo(0, 0), inputChannelInfoData), singletonMap(new ResultSubpartitionInfo(partitionIndex, 0), resultSubpartitionInfoData))));
NetworkBufferPool networkBufferPool = new NetworkBufferPool(4, 1024);
try {
int numChannels = 1;
InputGate gate = buildGate(networkBufferPool, numChannels);
reader.readInputData(new InputGate[] { gate });
assertArrayEquals(inputChannelInfoData, collectBytes(gate::pollNext, BufferOrEvent::getBuffer));
BufferWritingResultPartition resultPartition = buildResultPartition(networkBufferPool, ResultPartitionType.PIPELINED, partitionIndex, numChannels);
reader.readOutputData(new BufferWritingResultPartition[] { resultPartition }, false);
ResultSubpartitionView view = resultPartition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
assertArrayEquals(resultSubpartitionInfoData, collectBytes(() -> Optional.ofNullable(view.getNextBuffer()), BufferAndBacklog::buffer));
} finally {
networkBufferPool.destroy();
}
}
use of org.apache.flink.runtime.checkpoint.channel.InputChannelInfo in project flink by apache.
the class ChannelPersistenceITCase method write.
private ChannelStateWriteResult write(long checkpointId, Map<InputChannelInfo, byte[]> icMap, Map<ResultSubpartitionInfo, byte[]> rsMap) throws Exception {
int maxStateSize = sizeOfBytes(icMap) + sizeOfBytes(rsMap) + Long.BYTES * 2;
Map<InputChannelInfo, Buffer> icBuffers = wrapWithBuffers(icMap);
Map<ResultSubpartitionInfo, Buffer> rsBuffers = wrapWithBuffers(rsMap);
try (ChannelStateWriterImpl writer = new ChannelStateWriterImpl("test", 0, getStreamFactoryFactory(maxStateSize))) {
writer.open();
writer.start(checkpointId, new CheckpointOptions(CHECKPOINT, new CheckpointStorageLocationReference("poly".getBytes())));
for (Map.Entry<InputChannelInfo, Buffer> e : icBuffers.entrySet()) {
writer.addInputData(checkpointId, e.getKey(), SEQUENCE_NUMBER_UNKNOWN, ofElements(Buffer::recycleBuffer, e.getValue()));
}
writer.finishInput(checkpointId);
for (Map.Entry<ResultSubpartitionInfo, Buffer> e : rsBuffers.entrySet()) {
writer.addOutputData(checkpointId, e.getKey(), SEQUENCE_NUMBER_UNKNOWN, e.getValue());
}
writer.finishOutput(checkpointId);
ChannelStateWriteResult result = writer.getAndRemoveWriteResult(checkpointId);
// prevent abnormal complete in close
result.getResultSubpartitionStateHandles().join();
return result;
}
}
Aggregations