use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class ResourceManagerJobMasterTest method createAndStartResourceManagerService.
private void createAndStartResourceManagerService() throws Exception {
final TestingLeaderElectionService leaderElectionService = new TestingLeaderElectionService();
resourceManagerService = TestingResourceManagerService.newBuilder().setRpcService(rpcService).setJmLeaderRetrieverFunction(requestedJobId -> {
if (requestedJobId.equals(jobId)) {
return jobMasterLeaderRetrievalService;
} else {
throw new FlinkRuntimeException(String.format("Unknown job id %s", jobId));
}
}).setRmLeaderElectionService(leaderElectionService).build();
resourceManagerService.start();
resourceManagerService.isLeader(UUID.randomUUID());
leaderElectionService.getConfirmationFuture().thenRun(() -> {
resourceManagerGateway = resourceManagerService.getResourceManagerGateway().orElseThrow(() -> new AssertionError("RM not available after confirming leadership."));
}).get(TIMEOUT.getSize(), TIMEOUT.getUnit());
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RestServerEndpoint method checkAllEndpointsAndHandlersAreUnique.
private static void checkAllEndpointsAndHandlersAreUnique(final List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// check for all handlers that
// 1) the instance is only registered once
// 2) only 1 handler is registered for each endpoint (defined by (version, method, url))
// technically the first check is redundant since a duplicate instance also returns the same
// headers which
// should fail the second check, but we get a better error message
final Set<String> uniqueEndpoints = new HashSet<>();
final Set<ChannelInboundHandler> distinctHandlers = Collections.newSetFromMap(new IdentityHashMap<>());
for (Tuple2<RestHandlerSpecification, ChannelInboundHandler> handler : handlers) {
boolean isNewHandler = distinctHandlers.add(handler.f1);
if (!isNewHandler) {
throw new FlinkRuntimeException("Duplicate REST handler instance found." + " Please ensure each instance is registered only once.");
}
final RestHandlerSpecification headers = handler.f0;
for (RestAPIVersion supportedAPIVersion : headers.getSupportedAPIVersions()) {
final String parameterizedEndpoint = supportedAPIVersion.toString() + headers.getHttpMethod() + headers.getTargetRestEndpointURL();
// normalize path parameters; distinct path parameters still clash at runtime
final String normalizedEndpoint = parameterizedEndpoint.replaceAll(":[\\w-]+", ":param");
boolean isNewEndpoint = uniqueEndpoints.add(normalizedEndpoint);
if (!isNewEndpoint) {
throw new FlinkRuntimeException(String.format("REST handler registration overlaps with another registration for: version=%s, method=%s, url=%s.", supportedAPIVersion, headers.getHttpMethod(), headers.getTargetRestEndpointURL()));
}
}
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class SerializedJobExecutionResultTest method testSerialization.
@Test
public void testSerialization() throws Exception {
final ClassLoader classloader = getClass().getClassLoader();
JobID origJobId = new JobID();
long origTime = 65927436589267L;
Map<String, SerializedValue<OptionalFailure<Object>>> origMap = new HashMap<>();
origMap.put("name1", new SerializedValue<>(OptionalFailure.of(723L)));
origMap.put("name2", new SerializedValue<>(OptionalFailure.of("peter")));
origMap.put("name3", new SerializedValue<>(OptionalFailure.ofFailure(new ExpectedTestException())));
SerializedJobExecutionResult result = new SerializedJobExecutionResult(origJobId, origTime, origMap);
// serialize and deserialize the object
SerializedJobExecutionResult cloned = CommonTestUtils.createCopySerializable(result);
assertEquals(origJobId, cloned.getJobId());
assertEquals(origTime, cloned.getNetRuntime());
assertEquals(origTime, cloned.getNetRuntime(TimeUnit.MILLISECONDS));
assertEquals(origMap, cloned.getSerializedAccumulatorResults());
// convert to deserialized result
JobExecutionResult jResult = result.toJobExecutionResult(classloader);
JobExecutionResult jResultCopied = result.toJobExecutionResult(classloader);
assertEquals(origJobId, jResult.getJobID());
assertEquals(origJobId, jResultCopied.getJobID());
assertEquals(origTime, jResult.getNetRuntime());
assertEquals(origTime, jResult.getNetRuntime(TimeUnit.MILLISECONDS));
assertEquals(origTime, jResultCopied.getNetRuntime());
assertEquals(origTime, jResultCopied.getNetRuntime(TimeUnit.MILLISECONDS));
for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : origMap.entrySet()) {
String name = entry.getKey();
OptionalFailure<Object> value = entry.getValue().deserializeValue(classloader);
if (value.isFailure()) {
try {
jResult.getAccumulatorResult(name);
fail("expected failure");
} catch (FlinkRuntimeException ex) {
assertTrue(ExceptionUtils.findThrowable(ex, ExpectedTestException.class).isPresent());
}
try {
jResultCopied.getAccumulatorResult(name);
fail("expected failure");
} catch (FlinkRuntimeException ex) {
assertTrue(ExceptionUtils.findThrowable(ex, ExpectedTestException.class).isPresent());
}
} else {
assertEquals(value.get(), jResult.getAccumulatorResult(name));
assertEquals(value.get(), jResultCopied.getAccumulatorResult(name));
}
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class SubtaskExecutionAttemptAccumulatorsHandlerTest method testHandleRequest.
@Test
public void testHandleRequest() throws Exception {
// Instance the handler.
final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());
final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(() -> null, Time.milliseconds(100L), Collections.emptyMap(), SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(), new DefaultExecutionGraphCache(restHandlerConfiguration.getTimeout(), Time.milliseconds(restHandlerConfiguration.getRefreshInterval())), TestingUtils.defaultExecutor());
// Instance a empty request.
final HandlerRequest<EmptyRequestBody> request = HandlerRequest.create(EmptyRequestBody.getInstance(), new SubtaskAttemptMessageParameters());
final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));
// Instance the expected result.
final StringifiedAccumulatorResult[] accumulatorResults = StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
final int attemptNum = 1;
final int subtaskIndex = 2;
// Instance the tested execution.
final ArchivedExecution execution = new ArchivedExecution(accumulatorResults, null, new ExecutionAttemptID(), attemptNum, ExecutionState.FINISHED, null, null, null, subtaskIndex, new long[ExecutionState.values().length]);
// Invoke tested method.
final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);
final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
userAccumulatorList.add(new UserAccumulator(accumulatorResult.getName(), accumulatorResult.getType(), accumulatorResult.getValue()));
}
final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(subtaskIndex, attemptNum, execution.getAttemptId().toString(), userAccumulatorList);
// Verify.
assertEquals(expected, accumulatorsInfo);
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class CheckpointFailureManager method checkFailureAgainstCounter.
private void checkFailureAgainstCounter(CheckpointException exception, long checkpointId, Consumer<FlinkRuntimeException> errorHandler) {
if (checkpointId == UNKNOWN_CHECKPOINT_ID || checkpointId > lastSucceededCheckpointId) {
checkFailureCounter(exception, checkpointId);
if (continuousFailureCounter.get() > tolerableCpFailureNumber) {
clearCount();
errorHandler.accept(new FlinkRuntimeException(EXCEEDED_CHECKPOINT_TOLERABLE_FAILURE_MESSAGE));
}
}
}
Aggregations