use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class ResultPartitionFactory method initializeBoundedBlockingPartitions.
private static void initializeBoundedBlockingPartitions(ResultSubpartition[] subpartitions, BoundedBlockingResultPartition parent, BoundedBlockingSubpartitionType blockingSubpartitionType, int networkBufferSize, FileChannelManager channelManager, boolean sslEnabled) {
int i = 0;
try {
for (i = 0; i < subpartitions.length; i++) {
final File spillFile = channelManager.createChannel().getPathFile();
subpartitions[i] = blockingSubpartitionType.create(i, parent, spillFile, networkBufferSize, sslEnabled);
}
} catch (IOException e) {
// undo all the work so that a failed constructor does not leave any resources
// in need of disposal
releasePartitionsQuietly(subpartitions, i);
// is incompatible with eager initialization of resources (RAII).
throw new FlinkRuntimeException(e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class VertexFinishedStateCheckerTest method testRestoringPartiallyFinishedChainsFails.
private void testRestoringPartiallyFinishedChainsFails(boolean useUidHash) throws Exception {
final JobVertexID jobVertexID1 = new JobVertexID();
final JobVertexID jobVertexID2 = new JobVertexID();
// The op1 has uidHash set.
OperatorIDPair op1 = OperatorIDPair.of(new OperatorID(), new OperatorID());
OperatorIDPair op2 = OperatorIDPair.generatedIDOnly(new OperatorID());
OperatorIDPair op3 = OperatorIDPair.generatedIDOnly(new OperatorID());
final ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID2, 1, 1, singletonList(op3), true).addJobVertex(jobVertexID1, 1, 1, Arrays.asList(op1, op2), true).build();
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
operatorStates.put(useUidHash ? op1.getUserDefinedOperatorID().get() : op1.getGeneratedOperatorID(), new FullyFinishedOperatorState(op1.getGeneratedOperatorID(), 1, 1));
operatorStates.put(op2.getGeneratedOperatorID(), new OperatorState(op2.getGeneratedOperatorID(), 1, 1));
Set<ExecutionJobVertex> vertices = new HashSet<>();
vertices.add(graph.getJobVertex(jobVertexID1));
VertexFinishedStateChecker finishedStateChecker = new VertexFinishedStateChecker(vertices, operatorStates);
FlinkRuntimeException exception = assertThrows(FlinkRuntimeException.class, finishedStateChecker::validateOperatorsFinishedState);
assertThat(exception.getMessage(), is(equalTo("Can not restore vertex " + "anon(" + jobVertexID1 + ")" + " which contain mixed operator finished state: [ALL_RUNNING, FULLY_FINISHED]")));
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class StreamingJobGraphGenerator method createJobVertex.
private StreamConfig createJobVertex(Integer streamNodeId, OperatorChainInfo chainInfo) {
JobVertex jobVertex;
StreamNode streamNode = streamGraph.getStreamNode(streamNodeId);
byte[] hash = chainInfo.getHash(streamNodeId);
if (hash == null) {
throw new IllegalStateException("Cannot find node hash. " + "Did you generate them before calling this method?");
}
JobVertexID jobVertexId = new JobVertexID(hash);
List<Tuple2<byte[], byte[]>> chainedOperators = chainInfo.getChainedOperatorHashes(streamNodeId);
List<OperatorIDPair> operatorIDPairs = new ArrayList<>();
if (chainedOperators != null) {
for (Tuple2<byte[], byte[]> chainedOperator : chainedOperators) {
OperatorID userDefinedOperatorID = chainedOperator.f1 == null ? null : new OperatorID(chainedOperator.f1);
operatorIDPairs.add(OperatorIDPair.of(new OperatorID(chainedOperator.f0), userDefinedOperatorID));
}
}
if (chainedInputOutputFormats.containsKey(streamNodeId)) {
jobVertex = new InputOutputFormatVertex(chainedNames.get(streamNodeId), jobVertexId, operatorIDPairs);
chainedInputOutputFormats.get(streamNodeId).write(new TaskConfig(jobVertex.getConfiguration()));
} else {
jobVertex = new JobVertex(chainedNames.get(streamNodeId), jobVertexId, operatorIDPairs);
}
for (OperatorCoordinator.Provider coordinatorProvider : chainInfo.getCoordinatorProviders()) {
try {
jobVertex.addOperatorCoordinator(new SerializedValue<>(coordinatorProvider));
} catch (IOException e) {
throw new FlinkRuntimeException(String.format("Coordinator Provider for node %s is not serializable.", chainedNames.get(streamNodeId)), e);
}
}
jobVertex.setResources(chainedMinResources.get(streamNodeId), chainedPreferredResources.get(streamNodeId));
jobVertex.setInvokableClass(streamNode.getJobVertexClass());
int parallelism = streamNode.getParallelism();
if (parallelism > 0) {
jobVertex.setParallelism(parallelism);
} else {
parallelism = jobVertex.getParallelism();
}
jobVertex.setMaxParallelism(streamNode.getMaxParallelism());
if (LOG.isDebugEnabled()) {
LOG.debug("Parallelism set: {} for {}", parallelism, streamNodeId);
}
jobVertices.put(streamNodeId, jobVertex);
builtVertices.add(streamNodeId);
jobGraph.addVertex(jobVertex);
return new StreamConfig(jobVertex.getConfiguration());
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class StreamingJobGraphGenerator method configureCheckpointing.
private void configureCheckpointing() {
CheckpointConfig cfg = streamGraph.getCheckpointConfig();
long interval = cfg.getCheckpointInterval();
if (interval < MINIMAL_CHECKPOINT_TIME) {
// interval of max value means disable periodic checkpoint
interval = Long.MAX_VALUE;
}
// --- configure options ---
CheckpointRetentionPolicy retentionAfterTermination;
if (cfg.isExternalizedCheckpointsEnabled()) {
CheckpointConfig.ExternalizedCheckpointCleanup cleanup = cfg.getExternalizedCheckpointCleanup();
// Sanity check
if (cleanup == null) {
throw new IllegalStateException("Externalized checkpoints enabled, but no cleanup mode configured.");
}
retentionAfterTermination = cleanup.deleteOnCancellation() ? CheckpointRetentionPolicy.RETAIN_ON_FAILURE : CheckpointRetentionPolicy.RETAIN_ON_CANCELLATION;
} else {
retentionAfterTermination = CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION;
}
// --- configure the master-side checkpoint hooks ---
final ArrayList<MasterTriggerRestoreHook.Factory> hooks = new ArrayList<>();
for (StreamNode node : streamGraph.getStreamNodes()) {
if (node.getOperatorFactory() instanceof UdfStreamOperatorFactory) {
Function f = ((UdfStreamOperatorFactory) node.getOperatorFactory()).getUserFunction();
if (f instanceof WithMasterCheckpointHook) {
hooks.add(new FunctionMasterCheckpointHookFactory((WithMasterCheckpointHook<?>) f));
}
}
}
// because the hooks can have user-defined code, they need to be stored as
// eagerly serialized values
final SerializedValue<MasterTriggerRestoreHook.Factory[]> serializedHooks;
if (hooks.isEmpty()) {
serializedHooks = null;
} else {
try {
MasterTriggerRestoreHook.Factory[] asArray = hooks.toArray(new MasterTriggerRestoreHook.Factory[hooks.size()]);
serializedHooks = new SerializedValue<>(asArray);
} catch (IOException e) {
throw new FlinkRuntimeException("Trigger/restore hook is not serializable", e);
}
}
// because the state backend can have user-defined code, it needs to be stored as
// eagerly serialized value
final SerializedValue<StateBackend> serializedStateBackend;
if (streamGraph.getStateBackend() == null) {
serializedStateBackend = null;
} else {
try {
serializedStateBackend = new SerializedValue<StateBackend>(streamGraph.getStateBackend());
} catch (IOException e) {
throw new FlinkRuntimeException("State backend is not serializable", e);
}
}
// because the checkpoint storage can have user-defined code, it needs to be stored as
// eagerly serialized value
final SerializedValue<CheckpointStorage> serializedCheckpointStorage;
if (streamGraph.getCheckpointStorage() == null) {
serializedCheckpointStorage = null;
} else {
try {
serializedCheckpointStorage = new SerializedValue<>(streamGraph.getCheckpointStorage());
} catch (IOException e) {
throw new FlinkRuntimeException("Checkpoint storage is not serializable", e);
}
}
// --- done, put it all together ---
JobCheckpointingSettings settings = new JobCheckpointingSettings(CheckpointCoordinatorConfiguration.builder().setCheckpointInterval(interval).setCheckpointTimeout(cfg.getCheckpointTimeout()).setMinPauseBetweenCheckpoints(cfg.getMinPauseBetweenCheckpoints()).setMaxConcurrentCheckpoints(cfg.getMaxConcurrentCheckpoints()).setCheckpointRetentionPolicy(retentionAfterTermination).setExactlyOnce(getCheckpointingMode(cfg) == CheckpointingMode.EXACTLY_ONCE).setTolerableCheckpointFailureNumber(cfg.getTolerableCheckpointFailureNumber()).setUnalignedCheckpointsEnabled(cfg.isUnalignedCheckpointsEnabled()).setCheckpointIdOfIgnoredInFlightData(cfg.getCheckpointIdOfIgnoredInFlightData()).setAlignedCheckpointTimeout(cfg.getAlignedCheckpointTimeout().toMillis()).setEnableCheckpointsAfterTasksFinish(streamGraph.isEnableCheckpointsAfterTasksFinish()).build(), serializedStateBackend, streamGraph.isChangelogStateBackendEnabled(), serializedCheckpointStorage, serializedHooks);
jobGraph.setSnapshotSettings(settings);
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class MockKeyedStateBackend method createInternalState.
@Override
@SuppressWarnings("unchecked")
@Nonnull
public <N, SV, SEV, S extends State, IS extends S> IS createInternalState(@Nonnull TypeSerializer<N> namespaceSerializer, @Nonnull StateDescriptor<S, SV> stateDesc, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception {
StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getType());
if (stateFactory == null) {
String message = String.format("State %s is not supported by %s", stateDesc.getClass(), TtlStateFactory.class);
throw new FlinkRuntimeException(message);
}
IS state = stateFactory.createInternalState(namespaceSerializer, stateDesc);
stateSnapshotFilters.put(stateDesc.getName(), (StateSnapshotTransformer<Object>) getStateSnapshotTransformer(stateDesc, snapshotTransformFactory));
((MockInternalKvState<K, N, SV>) state).values = () -> stateValues.computeIfAbsent(stateDesc.getName(), n -> new HashMap<>()).computeIfAbsent(getCurrentKey(), k -> new HashMap<>());
return state;
}
Aggregations