Search in sources :

Example 76 with FiniteDuration

use of scala.concurrent.duration.FiniteDuration in project flink by apache.

the class ChaosMonkeyITCase method testChaosMonkey.

@Test
public void testChaosMonkey() throws Exception {
    // Test config
    final int numberOfJobManagers = 3;
    final int numberOfTaskManagers = 3;
    final int numberOfSlotsPerTaskManager = 2;
    // The final count each source is counting to: 1...n
    final int n = 5000;
    // Parallelism for the program
    final int parallelism = numberOfTaskManagers * numberOfSlotsPerTaskManager;
    // The test should not run longer than this
    final FiniteDuration testDuration = new FiniteDuration(10, TimeUnit.MINUTES);
    // Every x seconds a random job or task manager is killed
    //
    // The job will will be running for $killEvery seconds and then a random Job/TaskManager
    // will be killed. On recovery (which takes some time to bring up the new process etc.),
    // this test will wait for task managers to reconnect before starting the next count down.
    // Therefore the delay between retries is not important in this setup.
    final FiniteDuration killEvery = new FiniteDuration(5, TimeUnit.SECONDS);
    // Trigger a checkpoint every
    final int checkpointingIntervalMs = 1000;
    // Total number of kills
    final int totalNumberOfKills = 10;
    // -----------------------------------------------------------------------------------------
    // Setup
    Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(ZooKeeper.getConnectString(), FileStateBackendBasePath.toURI().toString());
    // Akka and restart timeouts
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "1000 ms");
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "6 s");
    config.setInteger(ConfigConstants.AKKA_WATCH_THRESHOLD, 9);
    if (checkpointingIntervalMs >= killEvery.toMillis()) {
        throw new IllegalArgumentException("Relax! You want to kill processes every " + killEvery + ", but the checkpointing interval is " + checkpointingIntervalMs / 1000 + " seconds. Either decrease the interval or " + "increase the kill interval. Otherwise, the program will not complete any " + "checkpoint.");
    }
    // Task manager
    config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numberOfSlotsPerTaskManager);
    ActorSystem testActorSystem = null;
    LeaderRetrievalService leaderRetrievalService = null;
    List<JobManagerProcess> jobManagerProcesses = new ArrayList<>();
    List<TaskManagerProcess> taskManagerProcesses = new ArrayList<>();
    try {
        // Initial state
        for (int i = 0; i < numberOfJobManagers; i++) {
            jobManagerProcesses.add(createAndStartJobManagerProcess(config));
        }
        for (int i = 0; i < numberOfTaskManagers; i++) {
            taskManagerProcesses.add(createAndStartTaskManagerProcess(config));
        }
        testActorSystem = AkkaUtils.createDefaultActorSystem();
        // Leader listener
        leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(config);
        TestingListener leaderListener = new TestingListener();
        leaderRetrievalService.start(leaderListener);
        Deadline deadline = testDuration.fromNow();
        // Wait for the new leader
        int leaderIndex = waitForNewLeader(leaderListener, jobManagerProcesses, deadline.timeLeft());
        // Wait for the task managers to connect
        waitForTaskManagers(numberOfTaskManagers, jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
        // The job
        JobGraph jobGraph = createJobGraph(n, CheckpointCompletedCoordination.getPath(), ProceedCoordination.getPath(), parallelism, checkpointingIntervalMs);
        LOG.info("Submitting job {}", jobGraph.getJobID());
        submitJobGraph(jobGraph, jobManagerProcesses.get(leaderIndex), leaderListener, testActorSystem, deadline.timeLeft());
        LOG.info("Waiting for a checkpoint to complete before kicking off chaos");
        // Wait for a checkpoint to complete
        TestJvmProcess.waitForMarkerFiles(FileStateBackendBasePath, COMPLETED_PREFIX, parallelism, deadline.timeLeft().toMillis());
        LOG.info("Checkpoint completed... ready for chaos");
        int currentKillNumber = 1;
        int currentJobManagerKills = 0;
        int currentTaskManagerKills = 0;
        for (int i = 0; i < totalNumberOfKills; i++) {
            LOG.info("Waiting for {} before next kill ({}/{})", killEvery, currentKillNumber++, totalNumberOfKills);
            Thread.sleep(killEvery.toMillis());
            LOG.info("Checking job status...");
            JobStatus jobStatus = requestJobStatus(jobGraph.getJobID(), jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
            if (jobStatus != JobStatus.RUNNING && jobStatus != JobStatus.FINISHED) {
                // Wait for it to run
                LOG.info("Waiting for job status {}", JobStatus.RUNNING);
                waitForJobRunning(jobGraph.getJobID(), jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
            } else if (jobStatus == JobStatus.FINISHED) {
                // Early finish
                LOG.info("Job finished");
                return;
            } else {
                LOG.info("Job status is {}", jobStatus);
            }
            if (rand.nextBoolean()) {
                LOG.info("Killing the leading JobManager");
                JobManagerProcess newJobManager = createAndStartJobManagerProcess(config);
                JobManagerProcess leader = jobManagerProcesses.remove(leaderIndex);
                leader.destroy();
                currentJobManagerKills++;
                LOG.info("Killed {}", leader);
                // Make sure to add the new job manager before looking for a new leader
                jobManagerProcesses.add(newJobManager);
                // Wait for the new leader
                leaderIndex = waitForNewLeader(leaderListener, jobManagerProcesses, deadline.timeLeft());
                // Wait for the task managers to connect
                waitForTaskManagers(numberOfTaskManagers, jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
            } else {
                LOG.info("Killing a random TaskManager");
                TaskManagerProcess newTaskManager = createAndStartTaskManagerProcess(config);
                // Wait for this new task manager to be connected
                waitForTaskManagers(numberOfTaskManagers + 1, jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
                // Now it's safe to kill a process
                int next = rand.nextInt(numberOfTaskManagers);
                TaskManagerProcess taskManager = taskManagerProcesses.remove(next);
                LOG.info("{} has been chosen. Killing process...", taskManager);
                taskManager.destroy();
                currentTaskManagerKills++;
                // Add the new task manager after killing an old one
                taskManagerProcesses.add(newTaskManager);
            }
        }
        LOG.info("Chaos is over. Total kills: {} ({} job manager + {} task managers). " + "Checking job status...", totalNumberOfKills, currentJobManagerKills, currentTaskManagerKills);
        // Signal the job to speed up (if it is not done yet)
        TestJvmProcess.touchFile(ProceedCoordination);
        // Wait for the job to finish
        LOG.info("Waiting for job status {}", JobStatus.FINISHED);
        waitForJobFinished(jobGraph.getJobID(), jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
        LOG.info("Job finished");
        LOG.info("Waiting for job removal");
        waitForJobRemoved(jobGraph.getJobID(), jobManagerProcesses.get(leaderIndex), testActorSystem, deadline.timeLeft());
        LOG.info("Job removed");
        LOG.info("Checking clean recovery state...");
        checkCleanRecoveryState(config);
        LOG.info("Recovery state clean");
    } catch (Throwable t) {
        // Print early (in some situations the process logs get too big
        // for Travis and the root problem is not shown)
        t.printStackTrace();
        System.out.println("#################################################");
        System.out.println(" TASK MANAGERS");
        System.out.println("#################################################");
        for (TaskManagerProcess taskManagerProcess : taskManagerProcesses) {
            taskManagerProcess.printProcessLog();
        }
        System.out.println("#################################################");
        System.out.println(" JOB MANAGERS");
        System.out.println("#################################################");
        for (JobManagerProcess jobManagerProcess : jobManagerProcesses) {
            jobManagerProcess.printProcessLog();
        }
        throw t;
    } finally {
        for (JobManagerProcess jobManagerProcess : jobManagerProcesses) {
            if (jobManagerProcess != null) {
                jobManagerProcess.destroy();
            }
        }
        if (leaderRetrievalService != null) {
            leaderRetrievalService.stop();
        }
        if (testActorSystem != null) {
            testActorSystem.shutdown();
        }
    }
}
Also used : ActorSystem(akka.actor.ActorSystem) TaskManagerProcess(org.apache.flink.runtime.testutils.TaskManagerProcess) Configuration(org.apache.flink.configuration.Configuration) Deadline(scala.concurrent.duration.Deadline) ArrayList(java.util.ArrayList) FiniteDuration(scala.concurrent.duration.FiniteDuration) JobStatus(org.apache.flink.runtime.jobgraph.JobStatus) TestingListener(org.apache.flink.runtime.leaderelection.TestingListener) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) JobManagerProcess(org.apache.flink.runtime.testutils.JobManagerProcess) Test(org.junit.Test)

Example 77 with FiniteDuration

use of scala.concurrent.duration.FiniteDuration in project flink by apache.

the class TaskManagerFailureRecoveryITCase method testRestartWithFailingTaskManager.

@Test
public void testRestartWithFailingTaskManager() {
    final int PARALLELISM = 4;
    LocalFlinkMiniCluster cluster = null;
    ActorSystem additionalSystem = null;
    try {
        Configuration config = new Configuration();
        config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
        config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, PARALLELISM);
        config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16);
        config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "500 ms");
        config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "20 s");
        config.setInteger(ConfigConstants.AKKA_WATCH_THRESHOLD, 20);
        cluster = new LocalFlinkMiniCluster(config, false);
        cluster.start();
        // for the result
        List<Long> resultCollection = new ArrayList<Long>();
        final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
        env.setParallelism(PARALLELISM);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
        env.getConfig().disableSysoutLogging();
        env.generateSequence(1, 10).map(new FailingMapper<Long>()).reduce(new ReduceFunction<Long>() {

            @Override
            public Long reduce(Long value1, Long value2) {
                return value1 + value2;
            }
        }).output(new LocalCollectionOutputFormat<Long>(resultCollection));
        // simple reference (atomic does not matter) to pass back an exception from the trigger thread
        final AtomicReference<Throwable> ref = new AtomicReference<Throwable>();
        // trigger the execution from a separate thread, so we are available to temper with the
        // cluster during the execution
        Thread trigger = new Thread("program trigger") {

            @Override
            public void run() {
                try {
                    env.execute();
                } catch (Throwable t) {
                    ref.set(t);
                }
            }
        };
        trigger.setDaemon(true);
        trigger.start();
        // the mappers in turn are waiting
        for (int i = 0; i < PARALLELISM; i++) {
            FailingMapper.TASK_TO_COORD_QUEUE.take();
        }
        // bring up one more task manager and wait for it to appear
        {
            additionalSystem = cluster.startTaskManagerActorSystem(2);
            ActorRef additionalTaskManager = cluster.startTaskManager(2, additionalSystem);
            Object message = TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage();
            Future<Object> future = Patterns.ask(additionalTaskManager, message, 30000);
            try {
                Await.result(future, new FiniteDuration(30000, TimeUnit.MILLISECONDS));
            } catch (TimeoutException e) {
                fail("The additional TaskManager did not come up within 30 seconds");
            }
        }
        // kill the two other TaskManagers
        for (ActorRef tm : cluster.getTaskManagersAsJava()) {
            tm.tell(PoisonPill.getInstance(), null);
        }
        // wait for the next set of mappers (the recovery ones) to come online
        for (int i = 0; i < PARALLELISM; i++) {
            FailingMapper.TASK_TO_COORD_QUEUE.take();
        }
        // tell the mappers that they may continue this time
        for (int i = 0; i < PARALLELISM; i++) {
            FailingMapper.COORD_TO_TASK_QUEUE.add(new Object());
        }
        // wait for the program to finish
        trigger.join();
        if (ref.get() != null) {
            Throwable t = ref.get();
            t.printStackTrace();
            fail("Program execution caused an exception: " + t.getMessage());
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        if (additionalSystem != null) {
            additionalSystem.shutdown();
        }
        if (cluster != null) {
            cluster.stop();
        }
    }
}
Also used : ActorSystem(akka.actor.ActorSystem) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) ArrayList(java.util.ArrayList) ReduceFunction(org.apache.flink.api.common.functions.ReduceFunction) FiniteDuration(scala.concurrent.duration.FiniteDuration) AtomicReference(java.util.concurrent.atomic.AtomicReference) TimeoutException(java.util.concurrent.TimeoutException) LocalFlinkMiniCluster(org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster) Future(scala.concurrent.Future) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Aggregations

FiniteDuration (scala.concurrent.duration.FiniteDuration)77 Test (org.junit.Test)61 Configuration (org.apache.flink.configuration.Configuration)37 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)30 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)27 ActorRef (akka.actor.ActorRef)25 Deadline (scala.concurrent.duration.Deadline)24 JobID (org.apache.flink.api.common.JobID)19 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)19 JobManagerMessages (org.apache.flink.runtime.messages.JobManagerMessages)17 TestingJobManagerMessages (org.apache.flink.runtime.testingUtils.TestingJobManagerMessages)13 ActorSystem (akka.actor.ActorSystem)12 JavaTestKit (akka.testkit.JavaTestKit)11 Timeout (akka.util.Timeout)11 File (java.io.File)11 TimeoutException (java.util.concurrent.TimeoutException)11 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)11 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)11 Props (akka.actor.Props)10 IOException (java.io.IOException)10