Search in sources :

Example 71 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskExecutionStateTest method testSerialization.

@Test
public void testSerialization() {
    try {
        final JobID jid = new JobID();
        final ExecutionAttemptID executionId = new ExecutionAttemptID();
        final ExecutionState state = ExecutionState.DEPLOYING;
        final Throwable error = new IOException("fubar");
        TaskExecutionState original1 = new TaskExecutionState(jid, executionId, state, error);
        TaskExecutionState original2 = new TaskExecutionState(jid, executionId, state);
        TaskExecutionState javaSerCopy1 = CommonTestUtils.createCopySerializable(original1);
        TaskExecutionState javaSerCopy2 = CommonTestUtils.createCopySerializable(original2);
        // equalities
        assertEquals(original1, javaSerCopy1);
        assertEquals(javaSerCopy1, original1);
        assertEquals(original2, javaSerCopy2);
        assertEquals(javaSerCopy2, original2);
        // hash codes
        assertEquals(original1.hashCode(), javaSerCopy1.hashCode());
        assertEquals(original2.hashCode(), javaSerCopy2.hashCode());
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionState(org.apache.flink.runtime.execution.ExecutionState) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) IOException(java.io.IOException) JobID(org.apache.flink.api.common.JobID) IOException(java.io.IOException) Test(org.junit.Test)

Example 72 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskInputSplitProviderTest method testRequestNextInputSplitWithInvalidExecutionID.

@Test
public void testRequestNextInputSplitWithInvalidExecutionID() throws InputSplitProviderException {
    final JobID jobID = new JobID();
    final JobVertexID vertexID = new JobVertexID();
    final ExecutionAttemptID executionID = new ExecutionAttemptID();
    final FiniteDuration timeout = new FiniteDuration(10, TimeUnit.SECONDS);
    final ActorGateway gateway = new NullInputSplitGateway();
    final TaskInputSplitProvider provider = new TaskInputSplitProvider(gateway, jobID, vertexID, executionID, timeout);
    // The jobManager will return a
    InputSplit nextInputSplit = provider.getNextInputSplit(getClass().getClassLoader());
    assertTrue(nextInputSplit == null);
}
Also used : ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) BaseTestingActorGateway(org.apache.flink.runtime.instance.BaseTestingActorGateway) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) FiniteDuration(scala.concurrent.duration.FiniteDuration) InputSplit(org.apache.flink.core.io.InputSplit) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 73 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskManagerTest method testLocalPartitionNotFound.

/**
	 *  Tests that repeated local {@link PartitionNotFoundException}s ultimately fail the receiver.
	 */
@Test
public void testLocalPartitionNotFound() throws Exception {
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                final IntermediateDataSetID resultId = new IntermediateDataSetID();
                // Create the JM
                ActorRef jm = system.actorOf(Props.create(new SimplePartitionStateLookupJobManagerCreator(leaderSessionID, getTestActor())));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                final Configuration config = new Configuration();
                config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
                config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
                taskManager = TestingUtils.createTaskManager(system, jobManager, config, true, true);
                // ---------------------------------------------------------------------------------
                final ActorGateway tm = taskManager;
                final JobID jid = new JobID();
                final JobVertexID vid = new JobVertexID();
                final ExecutionAttemptID eid = new ExecutionAttemptID();
                final ResultPartitionID partitionId = new ResultPartitionID();
                // Local location (on the same TM though) for the partition
                final ResultPartitionLocation loc = ResultPartitionLocation.createLocal();
                final InputChannelDeploymentDescriptor[] icdd = new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(partitionId, loc) };
                final InputGateDeploymentDescriptor igdd = new InputGateDeploymentDescriptor(resultId, ResultPartitionType.PIPELINED, 0, icdd);
                final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, new SerializedValue<>(new ExecutionConfig()), "Receiver", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.AgnosticReceiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(igdd), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), 0);
                new Within(new FiniteDuration(120, TimeUnit.SECONDS)) {

                    @Override
                    protected void run() {
                        // Submit the task
                        tm.tell(new SubmitTask(tdd), testActorGateway);
                        expectMsgClass(Acknowledge.get().getClass());
                        // Wait to be notified about the final execution state by the mock JM
                        TaskExecutionState msg = expectMsgClass(TaskExecutionState.class);
                        // The task should fail after repeated requests
                        assertEquals(msg.getExecutionState(), ExecutionState.FAILED);
                        Throwable error = msg.getError(getClass().getClassLoader());
                        if (error.getClass() != PartitionNotFoundException.class) {
                            error.printStackTrace();
                            fail("Wrong exception: " + error.getMessage());
                        }
                    }
                };
            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            } finally {
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) ResultPartitionLocation(org.apache.flink.runtime.deployment.ResultPartitionLocation) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) FiniteDuration(scala.concurrent.duration.FiniteDuration) InputGateDeploymentDescriptor(org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) InputChannelDeploymentDescriptor(org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 74 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskTest method createTask.

private Task createTask(Class<? extends AbstractInvokable> invokable, LibraryCacheManager libCache, NetworkEnvironment networkEnvironment, ResultPartitionConsumableNotifier consumableNotifier, PartitionProducerStateChecker partitionProducerStateChecker, Executor executor, Configuration taskManagerConfig, ExecutionConfig execConfig) throws IOException {
    JobID jobId = new JobID();
    JobVertexID jobVertexId = new JobVertexID();
    ExecutionAttemptID executionAttemptId = new ExecutionAttemptID();
    InputSplitProvider inputSplitProvider = new TaskInputSplitProvider(jobManagerGateway, jobId, jobVertexId, executionAttemptId, new FiniteDuration(60, TimeUnit.SECONDS));
    CheckpointResponder checkpointResponder = new ActorGatewayCheckpointResponder(jobManagerGateway);
    SerializedValue<ExecutionConfig> serializedExecutionConfig = new SerializedValue<>(execConfig);
    JobInformation jobInformation = new JobInformation(jobId, "Test Job", serializedExecutionConfig, new Configuration(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList());
    TaskInformation taskInformation = new TaskInformation(jobVertexId, "Test Task", 1, 1, invokable.getName(), new Configuration());
    return new Task(jobInformation, taskInformation, executionAttemptId, new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, null, mock(MemoryManager.class), mock(IOManager.class), networkEnvironment, mock(BroadcastVariableManager.class), taskManagerConnection, inputSplitProvider, checkpointResponder, libCache, mock(FileCache.class), new TestingTaskManagerRuntimeInfo(taskManagerConfig), mock(TaskMetricGroup.class), consumableNotifier, partitionProducerStateChecker, executor);
}
Also used : JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) Configuration(org.apache.flink.configuration.Configuration) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) FiniteDuration(scala.concurrent.duration.FiniteDuration) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) SerializedValue(org.apache.flink.util.SerializedValue) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) FileCache(org.apache.flink.runtime.filecache.FileCache) TestingTaskManagerRuntimeInfo(org.apache.flink.runtime.util.TestingTaskManagerRuntimeInfo) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) InputSplitProvider(org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider) JobID(org.apache.flink.api.common.JobID)

Example 75 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class AccumulatorLiveITCase method verifyResults.

private static void verifyResults() {
    new JavaTestKit(system) {

        {
            ActorGateway selfGateway = new AkkaActorGateway(getRef(), jobManagerGateway.leaderSessionID());
            // register for accumulator changes
            jobManagerGateway.tell(new TestingJobManagerMessages.NotifyWhenAccumulatorChange(jobID), selfGateway);
            expectMsgEquals(TIMEOUT, true);
            // submit job
            jobManagerGateway.tell(new JobManagerMessages.SubmitJob(jobGraph, ListeningBehaviour.EXECUTION_RESULT), selfGateway);
            expectMsgClass(TIMEOUT, JobManagerMessages.JobSubmitSuccess.class);
            TestingJobManagerMessages.UpdatedAccumulators msg = (TestingJobManagerMessages.UpdatedAccumulators) receiveOne(TIMEOUT);
            Map<String, Accumulator<?, ?>> userAccumulators = msg.userAccumulators();
            ExecutionAttemptID mapperTaskID = null;
            ExecutionAttemptID sinkTaskID = null;
            /* Check for accumulator values */
            if (checkUserAccumulators(0, userAccumulators)) {
                LOG.info("Passed initial check for map task.");
            } else {
                fail("Wrong accumulator results when map task begins execution.");
            }
            int expectedAccVal = 0;
            /* for mapper task */
            for (int i = 1; i <= NUM_ITERATIONS; i++) {
                expectedAccVal += i;
                // receive message
                msg = (TestingJobManagerMessages.UpdatedAccumulators) receiveOne(TIMEOUT);
                userAccumulators = msg.userAccumulators();
                LOG.info("{}", userAccumulators);
                if (checkUserAccumulators(expectedAccVal, userAccumulators)) {
                    LOG.info("Passed round #" + i);
                } else if (checkUserAccumulators(expectedAccVal, userAccumulators)) {
                    // we determined the wrong task id and need to switch the two here
                    ExecutionAttemptID temp = mapperTaskID;
                    mapperTaskID = sinkTaskID;
                    sinkTaskID = temp;
                    LOG.info("Passed round #" + i);
                } else {
                    fail("Failed in round #" + i);
                }
            }
            msg = (TestingJobManagerMessages.UpdatedAccumulators) receiveOne(TIMEOUT);
            userAccumulators = msg.userAccumulators();
            if (checkUserAccumulators(expectedAccVal, userAccumulators)) {
                LOG.info("Passed initial check for sink task.");
            } else {
                fail("Wrong accumulator results when sink task begins execution.");
            }
            /* for sink task */
            for (int i = 1; i <= NUM_ITERATIONS; i++) {
                // receive message
                msg = (TestingJobManagerMessages.UpdatedAccumulators) receiveOne(TIMEOUT);
                userAccumulators = msg.userAccumulators();
                LOG.info("{}", userAccumulators);
                if (checkUserAccumulators(expectedAccVal, userAccumulators)) {
                    LOG.info("Passed round #" + i);
                } else {
                    fail("Failed in round #" + i);
                }
            }
            expectMsgClass(TIMEOUT, JobManagerMessages.JobResultSuccess.class);
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) Accumulator(org.apache.flink.api.common.accumulators.Accumulator) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) JobManagerMessages(org.apache.flink.runtime.messages.JobManagerMessages) TestingJobManagerMessages(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages) TestingJobManagerMessages(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) JavaTestKit(akka.testkit.JavaTestKit)

Aggregations

ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)81 Test (org.junit.Test)66 JobID (org.apache.flink.api.common.JobID)61 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)41 IOException (java.io.IOException)31 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)30 Configuration (org.apache.flink.configuration.Configuration)24 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)21 AcknowledgeCheckpoint (org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint)19 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)17 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)16 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)15 TaskManagerServicesConfiguration (org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration)14 ActorRef (akka.actor.ActorRef)13 SubmitTask (org.apache.flink.runtime.messages.TaskMessages.SubmitTask)13 JavaTestKit (akka.testkit.JavaTestKit)12 BlobKey (org.apache.flink.runtime.blob.BlobKey)10 TriggerStackTraceSample (org.apache.flink.runtime.messages.StackTraceSampleMessages.TriggerStackTraceSample)10 PartitionNotFoundException (org.apache.flink.runtime.io.network.partition.PartitionNotFoundException)9 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)9