use of org.apache.flink.runtime.messages.JobManagerMessages.JobFound in project flink by apache.
the class AbstractQueryableStateITCase method testDuplicateRegistrationFailsJob.
/**
* Tests that duplicate query registrations fail the job at the JobManager.
*/
@Test
public void testDuplicateRegistrationFailsJob() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numKeys = 256;
JobID jobId = null;
try {
//
// Test program
//
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(NUM_SLOTS);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestKeyRangeSource(numKeys));
// Reducing state
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any-name", new SumReduce(), source.getType());
final String queryName = "duplicate-me";
final QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState(queryName, reducingState);
final QueryableStateStream<Integer, Tuple2<Integer, Long>> duplicate = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState(queryName);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
Future<JobStatusIs> failedFuture = cluster.getLeaderGateway(deadline.timeLeft()).ask(new NotifyWhenJobStatus(jobId, JobStatus.FAILED), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobStatusIs>apply(JobStatusIs.class));
cluster.submitJobDetached(jobGraph);
JobStatusIs jobStatus = Await.result(failedFuture, deadline.timeLeft());
assertEquals(JobStatus.FAILED, jobStatus.state());
// Get the job and check the cause
JobFound jobFound = Await.result(cluster.getLeaderGateway(deadline.timeLeft()).ask(new JobManagerMessages.RequestJob(jobId), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobFound>apply(JobFound.class)), deadline.timeLeft());
String failureCause = jobFound.executionGraph().getFailureCauseAsString();
assertTrue("Not instance of SuppressRestartsException", failureCause.startsWith("org.apache.flink.runtime.execution.SuppressRestartsException"));
int causedByIndex = failureCause.indexOf("Caused by: ");
String subFailureCause = failureCause.substring(causedByIndex + "Caused by: ".length());
assertTrue("Not caused by IllegalStateException", subFailureCause.startsWith("java.lang.IllegalStateException"));
assertTrue("Exception does not contain registration name", subFailureCause.contains(queryName));
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster.getLeaderGateway(deadline.timeLeft()).ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
}
}
Aggregations