use of org.apache.flink.util.function.SupplierWithException in project flink by apache.
the class HeapSnapshotStrategy method asyncSnapshot.
@Override
public SnapshotResultSupplier<KeyedStateHandle> asyncSnapshot(HeapSnapshotResources<K> syncPartResource, long checkpointId, long timestamp, @Nonnull CheckpointStreamFactory streamFactory, @Nonnull CheckpointOptions checkpointOptions) {
List<StateMetaInfoSnapshot> metaInfoSnapshots = syncPartResource.getMetaInfoSnapshots();
if (metaInfoSnapshots.isEmpty()) {
return snapshotCloseableRegistry -> SnapshotResult.empty();
}
final KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(// get a serialized form already at state registration time in the future
syncPartResource.getKeySerializer(), metaInfoSnapshots, !Objects.equals(UncompressedStreamCompressionDecorator.INSTANCE, keyGroupCompressionDecorator));
final SupplierWithException<CheckpointStreamWithResultProvider, Exception> checkpointStreamSupplier = localRecoveryConfig.isLocalRecoveryEnabled() && !checkpointOptions.getCheckpointType().isSavepoint() ? () -> createDuplicatingStream(checkpointId, CheckpointedStateScope.EXCLUSIVE, streamFactory, localRecoveryConfig.getLocalStateDirectoryProvider().orElseThrow(LocalRecoveryConfig.localRecoveryNotEnabled())) : () -> createSimpleStream(CheckpointedStateScope.EXCLUSIVE, streamFactory);
return (snapshotCloseableRegistry) -> {
final Map<StateUID, Integer> stateNamesToId = syncPartResource.getStateNamesToId();
final Map<StateUID, StateSnapshot> cowStateStableSnapshots = syncPartResource.getCowStateStableSnapshots();
final CheckpointStreamWithResultProvider streamWithResultProvider = checkpointStreamSupplier.get();
snapshotCloseableRegistry.registerCloseable(streamWithResultProvider);
final CheckpointStateOutputStream localStream = streamWithResultProvider.getCheckpointOutputStream();
final DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(localStream);
serializationProxy.write(outView);
final long[] keyGroupRangeOffsets = new long[keyGroupRange.getNumberOfKeyGroups()];
for (int keyGroupPos = 0; keyGroupPos < keyGroupRange.getNumberOfKeyGroups(); ++keyGroupPos) {
int keyGroupId = keyGroupRange.getKeyGroupId(keyGroupPos);
keyGroupRangeOffsets[keyGroupPos] = localStream.getPos();
outView.writeInt(keyGroupId);
for (Map.Entry<StateUID, StateSnapshot> stateSnapshot : cowStateStableSnapshots.entrySet()) {
StateSnapshot.StateKeyGroupWriter partitionedSnapshot = stateSnapshot.getValue().getKeyGroupWriter();
try (OutputStream kgCompressionOut = keyGroupCompressionDecorator.decorateWithCompression(localStream)) {
DataOutputViewStreamWrapper kgCompressionView = new DataOutputViewStreamWrapper(kgCompressionOut);
kgCompressionView.writeShort(stateNamesToId.get(stateSnapshot.getKey()));
partitionedSnapshot.writeStateInKeyGroup(kgCompressionView, keyGroupId);
}
// this will just close the outer compression stream
}
}
if (snapshotCloseableRegistry.unregisterCloseable(streamWithResultProvider)) {
KeyGroupRangeOffsets kgOffs = new KeyGroupRangeOffsets(keyGroupRange, keyGroupRangeOffsets);
SnapshotResult<StreamStateHandle> result = streamWithResultProvider.closeAndFinalizeCheckpointStreamResult();
return toKeyedStateHandleSnapshotResult(result, kgOffs, KeyGroupsStateHandle::new);
} else {
throw new IOException("Stream already unregistered.");
}
};
}
use of org.apache.flink.util.function.SupplierWithException in project flink by apache.
the class StreamingSink method compactionWriter.
/**
* Create a file writer with compaction operators by input stream. In addition, it can emit
* {@link PartitionCommitInfo} to down stream.
*/
public static <T> DataStream<PartitionCommitInfo> compactionWriter(ProviderContext providerContext, DataStream<T> inputStream, long bucketCheckInterval, StreamingFileSink.BucketsBuilder<T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>> bucketsBuilder, FileSystemFactory fsFactory, Path path, CompactReader.Factory<T> readFactory, long targetFileSize, int parallelism) {
CompactFileWriter<T> writer = new CompactFileWriter<>(bucketCheckInterval, bucketsBuilder);
SupplierWithException<FileSystem, IOException> fsSupplier = (SupplierWithException<FileSystem, IOException> & Serializable) () -> fsFactory.create(path.toUri());
CompactCoordinator coordinator = new CompactCoordinator(fsSupplier, targetFileSize);
SingleOutputStreamOperator<CoordinatorOutput> coordinatorOp = inputStream.transform("streaming-writer", TypeInformation.of(CoordinatorInput.class), writer).uid(providerContext.generateUid("streaming-writer").get()).setParallelism(parallelism).transform("compact-coordinator", TypeInformation.of(CoordinatorOutput.class), coordinator).uid(providerContext.generateUid("compact-coordinator").get()).setParallelism(1).setMaxParallelism(1);
CompactWriter.Factory<T> writerFactory = CompactBucketWriter.factory((SupplierWithException<BucketWriter<T, String>, IOException> & Serializable) bucketsBuilder::createBucketWriter);
CompactOperator<T> compacter = new CompactOperator<>(fsSupplier, readFactory, writerFactory);
return coordinatorOp.broadcast().transform("compact-operator", TypeInformation.of(PartitionCommitInfo.class), compacter).uid(providerContext.generateUid("compact-operator").get()).setParallelism(parallelism);
}
use of org.apache.flink.util.function.SupplierWithException in project flink by apache.
the class ChannelPersistenceITCase method collectBytes.
private <T> byte[] collectBytes(SupplierWithException<Optional<T>, Exception> entrySupplier, Function<T, Buffer> bufferExtractor) throws Exception {
ArrayList<Buffer> buffers = new ArrayList<>();
for (Optional<T> entry = entrySupplier.get(); entry.isPresent(); entry = entrySupplier.get()) {
entry.map(bufferExtractor).filter(buffer -> buffer.getDataType().isBuffer()).ifPresent(buffers::add);
}
ByteBuffer result = ByteBuffer.wrap(new byte[buffers.stream().mapToInt(Buffer::getSize).sum()]);
buffers.forEach(buffer -> {
result.put(buffer.getNioBufferReadable());
buffer.recycleBuffer();
});
return result.array();
}
use of org.apache.flink.util.function.SupplierWithException in project flink by apache.
the class ClassLoadingUtilsTest method testRunSupplierWithContextClassLoader.
@Test
public void testRunSupplierWithContextClassLoader() throws Exception {
SupplierWithException<ClassLoader, Exception> runnable = () -> Thread.currentThread().getContextClassLoader();
final ClassLoader contextClassLoader = ClassLoadingUtils.runWithContextClassLoader(runnable, TEST_CLASS_LOADER);
assertThat(contextClassLoader, is(TEST_CLASS_LOADER));
}
use of org.apache.flink.util.function.SupplierWithException in project flink by apache.
the class CommonTestUtils method waitForAllTaskRunning.
public static void waitForAllTaskRunning(SupplierWithException<AccessExecutionGraph, Exception> executionGraphSupplier, Deadline timeout, boolean allowFinished) throws Exception {
Predicate<AccessExecutionVertex> subtaskPredicate = task -> {
switch(task.getExecutionState()) {
case RUNNING:
return true;
case FINISHED:
if (allowFinished) {
return true;
} else {
throw new RuntimeException("Sub-Task finished unexpectedly" + task);
}
default:
return false;
}
};
waitUntilCondition(() -> {
final AccessExecutionGraph graph = executionGraphSupplier.get();
if (graph.getState().isGloballyTerminalState()) {
final ErrorInfo failureInfo = graph.getFailureInfo();
fail(format("Graph is in globally terminal state (%s)", graph.getState()), failureInfo != null ? failureInfo.getException() : null);
}
return graph.getState() == JobStatus.RUNNING && graph.getAllVertices().values().stream().allMatch(jobVertex -> Arrays.stream(jobVertex.getTaskVertices()).allMatch(subtaskPredicate));
}, timeout);
}
Aggregations