use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class HadoopRecoverableFsDataOutputStream method ensureTruncateInitialized.
private static void ensureTruncateInitialized() throws FlinkRuntimeException {
if (HadoopUtils.isMinHadoopVersion(2, 7) && truncateHandle == null) {
Method truncateMethod;
try {
truncateMethod = FileSystem.class.getMethod("truncate", Path.class, long.class);
} catch (NoSuchMethodException e) {
throw new FlinkRuntimeException("Could not find a public truncate method on the Hadoop File System.");
}
if (!Modifier.isPublic(truncateMethod.getModifiers())) {
throw new FlinkRuntimeException("Could not find a public truncate method on the Hadoop File System.");
}
truncateHandle = truncateMethod;
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class VertexFinishedStateChecker method checkPredecessorsOfPartiallyFinishedVertex.
private void checkPredecessorsOfPartiallyFinishedVertex(ExecutionJobVertex vertex, VerticesFinishedStatusCache verticesFinishedStatusCache) {
// Computes the distribution pattern from each predecessor. If there are multiple edges
// from a single predecessor, ALL_TO_ALL edges would have a higher priority since it
// implies stricter limitation (must be fully finished).
Map<JobVertexID, DistributionPattern> predecessorDistribution = new HashMap<>();
for (JobEdge jobEdge : vertex.getJobVertex().getInputs()) {
predecessorDistribution.compute(jobEdge.getSource().getProducer().getID(), (k, v) -> v == DistributionPattern.ALL_TO_ALL ? v : jobEdge.getDistributionPattern());
}
for (IntermediateResult dataset : vertex.getInputs()) {
ExecutionJobVertex predecessor = dataset.getProducer();
VertexFinishedState predecessorState = verticesFinishedStatusCache.getOrUpdate(predecessor);
DistributionPattern distribution = predecessorDistribution.get(predecessor.getJobVertexId());
if (distribution == DistributionPattern.ALL_TO_ALL && predecessorState != VertexFinishedState.FULLY_FINISHED) {
throw new FlinkRuntimeException("Illegal JobGraph modification. Cannot run a program with partially finished" + " vertices predeceased with running or partially finished ones and" + " connected via the ALL_TO_ALL edges. Task vertex " + vertex.getName() + "(" + vertex.getJobVertexId() + ")" + " has a " + (predecessorState == VertexFinishedState.ALL_RUNNING ? "all running" : "partially finished") + " predecessor");
} else if (distribution == DistributionPattern.POINTWISE && predecessorState == VertexFinishedState.ALL_RUNNING) {
throw new FlinkRuntimeException("Illegal JobGraph modification. Cannot run a program with partially finished" + " vertices predeceased with all running ones. Task vertex " + vertex.getName() + "(" + vertex.getJobVertexId() + ")" + " has a all running predecessor");
}
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class FileRegionBuffer method getNioBufferReadable.
/**
* This method is only called by tests and by event-deserialization, like checkpoint barriers.
* Because such events are not used for bounded intermediate results, this method currently
* executes only in tests.
*/
@Override
public ByteBuffer getNioBufferReadable() {
try {
final ByteBuffer buffer = ByteBuffer.allocateDirect(bufferSize());
BufferReaderWriterUtil.readByteBufferFully(fileChannel, buffer, position());
buffer.flip();
return buffer;
} catch (IOException e) {
// to declare IOExceptions, as would be necessary for a proper "lazy buffer".
throw new FlinkRuntimeException(e.getMessage(), e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class XaFacadeImpl method execute.
private <T> T execute(Command<T> cmd) throws FlinkRuntimeException {
Preconditions.checkState(isOpen(), "not connected");
LOG.debug("{}, xid={}", cmd.name, cmd.xid);
try {
T result = cmd.callable.call();
LOG.trace("{} succeeded , xid={}", cmd.name, cmd.xid);
return result;
} catch (XAException e) {
if (HEUR_ERR_CODES.contains(e.errorCode)) {
cmd.xid.ifPresent(this::forget);
}
return cmd.recover.apply(e).orElseThrow(() -> wrapException(cmd.name, cmd.xid, e));
} catch (FlinkRuntimeException e) {
throw e;
} catch (Exception e) {
throw wrapException(cmd.name, cmd.xid, e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class KubernetesMultipleComponentLeaderElectionHaServices method getOrInitializeSingleLeaderElectionService.
private DefaultMultipleComponentLeaderElectionService getOrInitializeSingleLeaderElectionService() {
synchronized (lock) {
if (multipleComponentLeaderElectionService == null) {
try {
final KubernetesLeaderElectionConfiguration leaderElectionConfiguration = new KubernetesLeaderElectionConfiguration(getClusterConfigMap(), lockIdentity, configuration);
multipleComponentLeaderElectionService = new DefaultMultipleComponentLeaderElectionService(fatalErrorHandler, new KubernetesMultipleComponentLeaderElectionDriverFactory(kubeClient, leaderElectionConfiguration, configMapSharedWatcher, watchExecutorService, fatalErrorHandler));
} catch (Exception e) {
throw new FlinkRuntimeException("Could not initialize the default single leader election service.", e);
}
}
return multipleComponentLeaderElectionService;
}
}
Aggregations