use of org.apache.flink.runtime.checkpoint.channel.ChannelStateWriterImpl in project flink by apache.
the class SubtaskCheckpointCoordinatorImpl method openChannelStateWriter.
private static ChannelStateWriter openChannelStateWriter(String taskName, CheckpointStorageWorkerView checkpointStorage, Environment env) {
ChannelStateWriterImpl writer = new ChannelStateWriterImpl(taskName, env.getTaskInfo().getIndexOfThisSubtask(), checkpointStorage);
writer.open();
return writer;
}
use of org.apache.flink.runtime.checkpoint.channel.ChannelStateWriterImpl in project flink by apache.
the class ChannelPersistenceITCase method write.
private ChannelStateWriteResult write(long checkpointId, Map<InputChannelInfo, byte[]> icMap, Map<ResultSubpartitionInfo, byte[]> rsMap) throws Exception {
int maxStateSize = sizeOfBytes(icMap) + sizeOfBytes(rsMap) + Long.BYTES * 2;
Map<InputChannelInfo, Buffer> icBuffers = wrapWithBuffers(icMap);
Map<ResultSubpartitionInfo, Buffer> rsBuffers = wrapWithBuffers(rsMap);
try (ChannelStateWriterImpl writer = new ChannelStateWriterImpl("test", 0, getStreamFactoryFactory(maxStateSize))) {
writer.open();
writer.start(checkpointId, new CheckpointOptions(CHECKPOINT, new CheckpointStorageLocationReference("poly".getBytes())));
for (Map.Entry<InputChannelInfo, Buffer> e : icBuffers.entrySet()) {
writer.addInputData(checkpointId, e.getKey(), SEQUENCE_NUMBER_UNKNOWN, ofElements(Buffer::recycleBuffer, e.getValue()));
}
writer.finishInput(checkpointId);
for (Map.Entry<ResultSubpartitionInfo, Buffer> e : rsBuffers.entrySet()) {
writer.addOutputData(checkpointId, e.getKey(), SEQUENCE_NUMBER_UNKNOWN, e.getValue());
}
writer.finishOutput(checkpointId);
ChannelStateWriteResult result = writer.getAndRemoveWriteResult(checkpointId);
// prevent abnormal complete in close
result.getResultSubpartitionStateHandles().join();
return result;
}
}
Aggregations