Search in sources :

Example 1 with JobFound

use of org.apache.flink.runtime.messages.JobManagerMessages.JobFound in project flink by apache.

the class AbstractQueryableStateITCase method testDuplicateRegistrationFailsJob.

/**
	 * Tests that duplicate query registrations fail the job at the JobManager.
	 */
@Test
public void testDuplicateRegistrationFailsJob() throws Exception {
    // Config
    final Deadline deadline = TEST_TIMEOUT.fromNow();
    final int numKeys = 256;
    JobID jobId = null;
    try {
        //
        // Test program
        //
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStateBackend(stateBackend);
        env.setParallelism(NUM_SLOTS);
        // Very important, because cluster is shared between tests and we
        // don't explicitly check that all slots are available before
        // submitting.
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
        DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestKeyRangeSource(numKeys));
        // Reducing state
        ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any-name", new SumReduce(), source.getType());
        final String queryName = "duplicate-me";
        final QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

            @Override
            public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
                return value.f0;
            }
        }).asQueryableState(queryName, reducingState);
        final QueryableStateStream<Integer, Tuple2<Integer, Long>> duplicate = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

            @Override
            public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
                return value.f0;
            }
        }).asQueryableState(queryName);
        // Submit the job graph
        JobGraph jobGraph = env.getStreamGraph().getJobGraph();
        jobId = jobGraph.getJobID();
        Future<JobStatusIs> failedFuture = cluster.getLeaderGateway(deadline.timeLeft()).ask(new NotifyWhenJobStatus(jobId, JobStatus.FAILED), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobStatusIs>apply(JobStatusIs.class));
        cluster.submitJobDetached(jobGraph);
        JobStatusIs jobStatus = Await.result(failedFuture, deadline.timeLeft());
        assertEquals(JobStatus.FAILED, jobStatus.state());
        // Get the job and check the cause
        JobFound jobFound = Await.result(cluster.getLeaderGateway(deadline.timeLeft()).ask(new JobManagerMessages.RequestJob(jobId), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobFound>apply(JobFound.class)), deadline.timeLeft());
        String failureCause = jobFound.executionGraph().getFailureCauseAsString();
        assertTrue("Not instance of SuppressRestartsException", failureCause.startsWith("org.apache.flink.runtime.execution.SuppressRestartsException"));
        int causedByIndex = failureCause.indexOf("Caused by: ");
        String subFailureCause = failureCause.substring(causedByIndex + "Caused by: ".length());
        assertTrue("Not caused by IllegalStateException", subFailureCause.startsWith("java.lang.IllegalStateException"));
        assertTrue("Exception does not contain registration name", subFailureCause.contains(queryName));
    } finally {
        // Free cluster resources
        if (jobId != null) {
            Future<CancellationSuccess> cancellation = cluster.getLeaderGateway(deadline.timeLeft()).ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
            Await.ready(cancellation, deadline.timeLeft());
        }
    }
}
Also used : KeySelector(org.apache.flink.api.java.functions.KeySelector) JobFound(org.apache.flink.runtime.messages.JobManagerMessages.JobFound) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) JobStatusIs(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.JobStatusIs) Deadline(scala.concurrent.duration.Deadline) JobManagerMessages(org.apache.flink.runtime.messages.JobManagerMessages) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Tuple2(org.apache.flink.api.java.tuple.Tuple2) AtomicLong(java.util.concurrent.atomic.AtomicLong) CancellationSuccess(org.apache.flink.runtime.messages.JobManagerMessages.CancellationSuccess) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) NotifyWhenJobStatus(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenJobStatus) Test(org.junit.Test)

Aggregations

AtomicLong (java.util.concurrent.atomic.AtomicLong)1 JobID (org.apache.flink.api.common.JobID)1 ReducingStateDescriptor (org.apache.flink.api.common.state.ReducingStateDescriptor)1 KeySelector (org.apache.flink.api.java.functions.KeySelector)1 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)1 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)1 JobManagerMessages (org.apache.flink.runtime.messages.JobManagerMessages)1 CancellationSuccess (org.apache.flink.runtime.messages.JobManagerMessages.CancellationSuccess)1 JobFound (org.apache.flink.runtime.messages.JobManagerMessages.JobFound)1 JobStatusIs (org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.JobStatusIs)1 NotifyWhenJobStatus (org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenJobStatus)1 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)1 Test (org.junit.Test)1 Deadline (scala.concurrent.duration.Deadline)1