Search in sources :

Example 6 with Configuration

use of org.apache.flink.configuration.Configuration in project flink by apache.

the class WebRuntimeMonitorITCase method testStandaloneWebRuntimeMonitor.

/**
	 * Tests operation of the monitor in standalone operation.
	 */
@Test
public void testStandaloneWebRuntimeMonitor() throws Exception {
    final Deadline deadline = TestTimeout.fromNow();
    TestingCluster flink = null;
    WebRuntimeMonitor webMonitor = null;
    try {
        // Flink w/o a web monitor
        flink = new TestingCluster(new Configuration());
        flink.start(true);
        webMonitor = startWebRuntimeMonitor(flink);
        try (HttpTestClient client = new HttpTestClient("localhost", webMonitor.getServerPort())) {
            String expected = new Scanner(new File(MAIN_RESOURCES_PATH + "/index.html")).useDelimiter("\\A").next();
            // Request the file from the web server
            client.sendGetRequest("index.html", deadline.timeLeft());
            HttpTestClient.SimpleHttpResponse response = client.getNextResponse(deadline.timeLeft());
            assertEquals(HttpResponseStatus.OK, response.getStatus());
            assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("html"));
            assertEquals(expected, response.getContent());
            // Simple overview request
            client.sendGetRequest("/overview", deadline.timeLeft());
            response = client.getNextResponse(deadline.timeLeft());
            assertEquals(HttpResponseStatus.OK, response.getStatus());
            assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("json"));
            assertTrue(response.getContent().contains("\"taskmanagers\":1"));
        }
    } finally {
        if (flink != null) {
            flink.shutdown();
        }
        if (webMonitor != null) {
            webMonitor.stop();
        }
    }
}
Also used : Scanner(java.util.Scanner) HttpTestClient(org.apache.flink.runtime.webmonitor.testutils.HttpTestClient) TestingCluster(org.apache.flink.runtime.testingUtils.TestingCluster) Configuration(org.apache.flink.configuration.Configuration) Deadline(scala.concurrent.duration.Deadline) File(java.io.File) Test(org.junit.Test)

Example 7 with Configuration

use of org.apache.flink.configuration.Configuration in project flink by apache.

the class WebRuntimeMonitorITCase method startWebRuntimeMonitor.

private WebRuntimeMonitor startWebRuntimeMonitor(TestingCluster flink) throws Exception {
    ActorSystem jmActorSystem = flink.jobManagerActorSystems().get().head();
    ActorRef jmActor = flink.jobManagerActors().get().head();
    // Needs to match the leader address from the leader retrieval service
    String jobManagerAddress = AkkaUtils.getAkkaURL(jmActorSystem, jmActor);
    File logDir = temporaryFolder.newFolder("log");
    Path logFile = Files.createFile(new File(logDir, "jobmanager.log").toPath());
    Files.createFile(new File(logDir, "jobmanager.out").toPath());
    // Web frontend on random port
    Configuration config = new Configuration();
    config.setInteger(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, 0);
    config.setString(ConfigConstants.JOB_MANAGER_WEB_LOG_PATH_KEY, logFile.toString());
    WebRuntimeMonitor webMonitor = new WebRuntimeMonitor(config, flink.createLeaderRetrievalService(), jmActorSystem);
    webMonitor.start(jobManagerAddress);
    flink.waitForActorsToBeAlive();
    return webMonitor;
}
Also used : ActorSystem(akka.actor.ActorSystem) Path(java.nio.file.Path) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) File(java.io.File)

Example 8 with Configuration

use of org.apache.flink.configuration.Configuration in project flink by apache.

the class JobManagerHARecoveryTest method testJobRecoveryWhenLosingLeadership.

/**
	 * Tests that the persisted job is not removed from the SubmittedJobGraphStore if the JobManager
	 * loses its leadership. Furthermore, it tests that the job manager can recover the job from
	 * the SubmittedJobGraphStore and checkpoint state is recovered as well.
	 */
@Test
public void testJobRecoveryWhenLosingLeadership() throws Exception {
    FiniteDuration timeout = new FiniteDuration(30, TimeUnit.SECONDS);
    FiniteDuration jobRecoveryTimeout = new FiniteDuration(3, TimeUnit.SECONDS);
    Deadline deadline = new FiniteDuration(2, TimeUnit.MINUTES).fromNow();
    Configuration flinkConfiguration = new Configuration();
    UUID leaderSessionID = UUID.randomUUID();
    UUID newLeaderSessionID = UUID.randomUUID();
    int slots = 2;
    ActorRef archive = null;
    ActorRef jobManager = null;
    ActorRef taskManager = null;
    flinkConfiguration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper");
    flinkConfiguration.setString(HighAvailabilityOptions.HA_STORAGE_PATH, temporaryFolder.newFolder().toString());
    flinkConfiguration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, slots);
    try {
        Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
        MySubmittedJobGraphStore mySubmittedJobGraphStore = new MySubmittedJobGraphStore();
        MyCheckpointStore checkpointStore = new MyCheckpointStore();
        CheckpointIDCounter checkpointCounter = new StandaloneCheckpointIDCounter();
        CheckpointRecoveryFactory checkpointStateFactory = new MyCheckpointRecoveryFactory(checkpointStore, checkpointCounter);
        TestingLeaderElectionService myLeaderElectionService = new TestingLeaderElectionService();
        TestingLeaderRetrievalService myLeaderRetrievalService = new TestingLeaderRetrievalService();
        InstanceManager instanceManager = new InstanceManager();
        instanceManager.addInstanceListener(scheduler);
        archive = system.actorOf(Props.create(MemoryArchivist.class, 10));
        Props jobManagerProps = Props.create(TestingJobManager.class, flinkConfiguration, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), instanceManager, scheduler, new BlobLibraryCacheManager(new BlobServer(flinkConfiguration), 3600000), archive, new FixedDelayRestartStrategy.FixedDelayRestartStrategyFactory(Int.MaxValue(), 100), timeout, myLeaderElectionService, mySubmittedJobGraphStore, checkpointStateFactory, jobRecoveryTimeout, Option.apply(null));
        jobManager = system.actorOf(jobManagerProps);
        ActorGateway gateway = new AkkaActorGateway(jobManager, leaderSessionID);
        taskManager = TaskManager.startTaskManagerComponentsAndActor(flinkConfiguration, ResourceID.generate(), system, "localhost", Option.apply("taskmanager"), Option.apply((LeaderRetrievalService) myLeaderRetrievalService), true, TestingTaskManager.class);
        ActorGateway tmGateway = new AkkaActorGateway(taskManager, leaderSessionID);
        Future<Object> tmAlive = tmGateway.ask(TestingMessages.getAlive(), deadline.timeLeft());
        Await.ready(tmAlive, deadline.timeLeft());
        JobVertex sourceJobVertex = new JobVertex("Source");
        sourceJobVertex.setInvokableClass(BlockingStatefulInvokable.class);
        sourceJobVertex.setParallelism(slots);
        JobGraph jobGraph = new JobGraph("TestingJob", sourceJobVertex);
        List<JobVertexID> vertexId = Collections.singletonList(sourceJobVertex.getID());
        jobGraph.setSnapshotSettings(new JobSnapshottingSettings(vertexId, vertexId, vertexId, 100, 10 * 60 * 1000, 0, 1, ExternalizedCheckpointSettings.none(), null, true));
        BlockingStatefulInvokable.initializeStaticHelpers(slots);
        Future<Object> isLeader = gateway.ask(TestingJobManagerMessages.getNotifyWhenLeader(), deadline.timeLeft());
        Future<Object> isConnectedToJobManager = tmGateway.ask(new TestingTaskManagerMessages.NotifyWhenRegisteredAtJobManager(jobManager), deadline.timeLeft());
        // tell jobManager that he's the leader
        myLeaderElectionService.isLeader(leaderSessionID);
        // tell taskManager who's the leader
        myLeaderRetrievalService.notifyListener(gateway.path(), leaderSessionID);
        Await.ready(isLeader, deadline.timeLeft());
        Await.ready(isConnectedToJobManager, deadline.timeLeft());
        // submit blocking job
        Future<Object> jobSubmitted = gateway.ask(new JobManagerMessages.SubmitJob(jobGraph, ListeningBehaviour.DETACHED), deadline.timeLeft());
        Await.ready(jobSubmitted, deadline.timeLeft());
        // Wait for some checkpoints to complete
        BlockingStatefulInvokable.awaitCompletedCheckpoints();
        Future<Object> jobRemoved = gateway.ask(new TestingJobManagerMessages.NotifyWhenJobRemoved(jobGraph.getJobID()), deadline.timeLeft());
        // Revoke leadership
        myLeaderElectionService.notLeader();
        // check that the job gets removed from the JobManager
        Await.ready(jobRemoved, deadline.timeLeft());
        // but stays in the submitted job graph store
        assertTrue(mySubmittedJobGraphStore.contains(jobGraph.getJobID()));
        Future<Object> jobRunning = gateway.ask(new TestingJobManagerMessages.NotifyWhenJobStatus(jobGraph.getJobID(), JobStatus.RUNNING), deadline.timeLeft());
        // Make JobManager again a leader
        myLeaderElectionService.isLeader(newLeaderSessionID);
        // tell the TaskManager about it
        myLeaderRetrievalService.notifyListener(gateway.path(), newLeaderSessionID);
        // wait that the job is recovered and reaches state RUNNING
        Await.ready(jobRunning, deadline.timeLeft());
        Future<Object> jobFinished = gateway.ask(new TestingJobManagerMessages.NotifyWhenJobRemoved(jobGraph.getJobID()), deadline.timeLeft());
        BlockingInvokable.unblock();
        // wait til the job has finished
        Await.ready(jobFinished, deadline.timeLeft());
        // check that the job has been removed from the submitted job graph store
        assertFalse(mySubmittedJobGraphStore.contains(jobGraph.getJobID()));
        // Check that state has been recovered
        long[] recoveredStates = BlockingStatefulInvokable.getRecoveredStates();
        for (long state : recoveredStates) {
            boolean isExpected = state >= BlockingStatefulInvokable.NUM_CHECKPOINTS_TO_COMPLETE;
            assertTrue("Did not recover checkpoint state correctly, expecting >= " + BlockingStatefulInvokable.NUM_CHECKPOINTS_TO_COMPLETE + ", but state was " + state, isExpected);
        }
    } finally {
        if (archive != null) {
            archive.tell(PoisonPill.getInstance(), ActorRef.noSender());
        }
        if (jobManager != null) {
            jobManager.tell(PoisonPill.getInstance(), ActorRef.noSender());
        }
        if (taskManager != null) {
            taskManager.tell(PoisonPill.getInstance(), ActorRef.noSender());
        }
    }
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) BlobLibraryCacheManager(org.apache.flink.runtime.execution.librarycache.BlobLibraryCacheManager) Configuration(org.apache.flink.configuration.Configuration) TestingLeaderRetrievalService(org.apache.flink.runtime.leaderelection.TestingLeaderRetrievalService) FixedDelayRestartStrategy(org.apache.flink.runtime.executiongraph.restart.FixedDelayRestartStrategy) ActorRef(akka.actor.ActorRef) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) InstanceManager(org.apache.flink.runtime.instance.InstanceManager) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) Props(akka.actor.Props) TestingJobManagerMessages(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TestingTaskManager(org.apache.flink.runtime.testingUtils.TestingTaskManager) BlobServer(org.apache.flink.runtime.blob.BlobServer) CheckpointIDCounter(org.apache.flink.runtime.checkpoint.CheckpointIDCounter) StandaloneCheckpointIDCounter(org.apache.flink.runtime.checkpoint.StandaloneCheckpointIDCounter) UUID(java.util.UUID) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) TestingLeaderElectionService(org.apache.flink.runtime.leaderelection.TestingLeaderElectionService) Deadline(scala.concurrent.duration.Deadline) JobSnapshottingSettings(org.apache.flink.runtime.jobgraph.tasks.JobSnapshottingSettings) JobManagerMessages(org.apache.flink.runtime.messages.JobManagerMessages) TestingJobManagerMessages(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages) FiniteDuration(scala.concurrent.duration.FiniteDuration) CheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory) CompletedCheckpoint(org.apache.flink.runtime.checkpoint.CompletedCheckpoint) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) StandaloneCheckpointIDCounter(org.apache.flink.runtime.checkpoint.StandaloneCheckpointIDCounter) Test(org.junit.Test)

Example 9 with Configuration

use of org.apache.flink.configuration.Configuration in project flink by apache.

the class JobManagerTest method testKvStateMessages.

/**
	 * Tests that the JobManager handles {@link org.apache.flink.runtime.query.KvStateMessage}
	 * instances as expected.
	 */
@Test
public void testKvStateMessages() throws Exception {
    Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow();
    Configuration config = new Configuration();
    config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "100ms");
    UUID leaderSessionId = null;
    ActorGateway jobManager = new AkkaActorGateway(JobManager.startJobManagerActors(config, system, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), TestingJobManager.class, MemoryArchivist.class)._1(), leaderSessionId);
    LeaderRetrievalService leaderRetrievalService = new StandaloneLeaderRetrievalService(AkkaUtils.getAkkaURL(system, jobManager.actor()));
    Configuration tmConfig = new Configuration();
    tmConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 4);
    tmConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8);
    ActorRef taskManager = TaskManager.startTaskManagerComponentsAndActor(tmConfig, ResourceID.generate(), system, "localhost", scala.Option.<String>empty(), scala.Option.apply(leaderRetrievalService), true, TestingTaskManager.class);
    Future<Object> registrationFuture = jobManager.ask(new NotifyWhenAtLeastNumTaskManagerAreRegistered(1), deadline.timeLeft());
    Await.ready(registrationFuture, deadline.timeLeft());
    //
    // Location lookup
    //
    LookupKvStateLocation lookupNonExistingJob = new LookupKvStateLocation(new JobID(), "any-name");
    Future<KvStateLocation> lookupFuture = jobManager.ask(lookupNonExistingJob, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (IllegalStateException ignored) {
    // Expected
    }
    JobGraph jobGraph = new JobGraph("croissant");
    JobVertex jobVertex1 = new JobVertex("cappuccino");
    jobVertex1.setParallelism(4);
    jobVertex1.setMaxParallelism(16);
    jobVertex1.setInvokableClass(BlockingNoOpInvokable.class);
    JobVertex jobVertex2 = new JobVertex("americano");
    jobVertex2.setParallelism(4);
    jobVertex2.setMaxParallelism(16);
    jobVertex2.setInvokableClass(BlockingNoOpInvokable.class);
    jobGraph.addVertex(jobVertex1);
    jobGraph.addVertex(jobVertex2);
    Future<JobSubmitSuccess> submitFuture = jobManager.ask(new SubmitJob(jobGraph, ListeningBehaviour.DETACHED), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobSubmitSuccess>apply(JobSubmitSuccess.class));
    Await.result(submitFuture, deadline.timeLeft());
    Object lookupUnknownRegistrationName = new LookupKvStateLocation(jobGraph.getJobID(), "unknown");
    lookupFuture = jobManager.ask(lookupUnknownRegistrationName, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (UnknownKvStateLocation ignored) {
    // Expected
    }
    //
    // Registration
    //
    NotifyKvStateRegistered registerNonExistingJob = new NotifyKvStateRegistered(new JobID(), new JobVertexID(), new KeyGroupRange(0, 0), "any-name", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1233));
    jobManager.tell(registerNonExistingJob);
    LookupKvStateLocation lookupAfterRegistration = new LookupKvStateLocation(registerNonExistingJob.getJobId(), registerNonExistingJob.getRegistrationName());
    lookupFuture = jobManager.ask(lookupAfterRegistration, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (IllegalStateException ignored) {
    // Expected
    }
    NotifyKvStateRegistered registerForExistingJob = new NotifyKvStateRegistered(jobGraph.getJobID(), jobVertex1.getID(), new KeyGroupRange(0, 0), "register-me", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1293));
    jobManager.tell(registerForExistingJob);
    lookupAfterRegistration = new LookupKvStateLocation(registerForExistingJob.getJobId(), registerForExistingJob.getRegistrationName());
    lookupFuture = jobManager.ask(lookupAfterRegistration, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    KvStateLocation location = Await.result(lookupFuture, deadline.timeLeft());
    assertNotNull(location);
    assertEquals(jobGraph.getJobID(), location.getJobId());
    assertEquals(jobVertex1.getID(), location.getJobVertexId());
    assertEquals(jobVertex1.getMaxParallelism(), location.getNumKeyGroups());
    assertEquals(1, location.getNumRegisteredKeyGroups());
    KeyGroupRange keyGroupRange = registerForExistingJob.getKeyGroupRange();
    assertEquals(1, keyGroupRange.getNumberOfKeyGroups());
    assertEquals(registerForExistingJob.getKvStateId(), location.getKvStateID(keyGroupRange.getStartKeyGroup()));
    assertEquals(registerForExistingJob.getKvStateServerAddress(), location.getKvStateServerAddress(keyGroupRange.getStartKeyGroup()));
    //
    // Unregistration
    //
    NotifyKvStateUnregistered unregister = new NotifyKvStateUnregistered(registerForExistingJob.getJobId(), registerForExistingJob.getJobVertexId(), registerForExistingJob.getKeyGroupRange(), registerForExistingJob.getRegistrationName());
    jobManager.tell(unregister);
    lookupFuture = jobManager.ask(lookupAfterRegistration, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (UnknownKvStateLocation ignored) {
    // Expected
    }
    //
    // Duplicate registration fails task
    //
    NotifyKvStateRegistered register = new NotifyKvStateRegistered(jobGraph.getJobID(), jobVertex1.getID(), new KeyGroupRange(0, 0), "duplicate-me", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1293));
    NotifyKvStateRegistered duplicate = new NotifyKvStateRegistered(jobGraph.getJobID(), // <--- different operator, but...
    jobVertex2.getID(), new KeyGroupRange(0, 0), // ...same name
    "duplicate-me", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1293));
    Future<TestingJobManagerMessages.JobStatusIs> failedFuture = jobManager.ask(new NotifyWhenJobStatus(jobGraph.getJobID(), JobStatus.FAILED), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobStatusIs>apply(JobStatusIs.class));
    jobManager.tell(register);
    jobManager.tell(duplicate);
    // Wait for failure
    JobStatusIs jobStatus = Await.result(failedFuture, deadline.timeLeft());
    assertEquals(JobStatus.FAILED, jobStatus.state());
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) Configuration(org.apache.flink.configuration.Configuration) UnknownKvStateLocation(org.apache.flink.runtime.query.UnknownKvStateLocation) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) LookupKvStateLocation(org.apache.flink.runtime.query.KvStateMessage.LookupKvStateLocation) KvStateLocation(org.apache.flink.runtime.query.KvStateLocation) UnknownKvStateLocation(org.apache.flink.runtime.query.UnknownKvStateLocation) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) JobSubmitSuccess(org.apache.flink.runtime.messages.JobManagerMessages.JobSubmitSuccess) KvStateID(org.apache.flink.runtime.query.KvStateID) UUID(java.util.UUID) SubmitJob(org.apache.flink.runtime.messages.JobManagerMessages.SubmitJob) NotifyKvStateRegistered(org.apache.flink.runtime.query.KvStateMessage.NotifyKvStateRegistered) NotifyKvStateUnregistered(org.apache.flink.runtime.query.KvStateMessage.NotifyKvStateUnregistered) JobStatusIs(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.JobStatusIs) Deadline(scala.concurrent.duration.Deadline) FiniteDuration(scala.concurrent.duration.FiniteDuration) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) StandaloneLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) StandaloneLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService) LookupKvStateLocation(org.apache.flink.runtime.query.KvStateMessage.LookupKvStateLocation) NotifyWhenAtLeastNumTaskManagerAreRegistered(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenAtLeastNumTaskManagerAreRegistered) JobID(org.apache.flink.api.common.JobID) NotifyWhenJobStatus(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenJobStatus) Test(org.junit.Test)

Example 10 with Configuration

use of org.apache.flink.configuration.Configuration in project flink by apache.

the class JobManagerTest method testNullHostnameGoesToLocalhost.

@Test
public void testNullHostnameGoesToLocalhost() {
    try {
        Tuple2<String, Object> address = new Tuple2<String, Object>(null, 1772);
        Config cfg = AkkaUtils.getAkkaConfig(new Configuration(), new Some<Tuple2<String, Object>>(address));
        String hostname = cfg.getString("akka.remote.netty.tcp.hostname");
        assertTrue(InetAddress.getByName(hostname).isLoopbackAddress());
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) Tuple2(scala.Tuple2) Config(com.typesafe.config.Config) Test(org.junit.Test)

Aggregations

Configuration (org.apache.flink.configuration.Configuration)630 Test (org.junit.Test)452 IOException (java.io.IOException)137 FileInputSplit (org.apache.flink.core.fs.FileInputSplit)93 File (java.io.File)92 JobID (org.apache.flink.api.common.JobID)74 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)68 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)49 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)46 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)45 Path (org.apache.flink.core.fs.Path)44 ActorRef (akka.actor.ActorRef)43 ArrayList (java.util.ArrayList)43 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)39 FiniteDuration (scala.concurrent.duration.FiniteDuration)38 LocalFlinkMiniCluster (org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster)36 BeforeClass (org.junit.BeforeClass)35 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)33 MetricRegistry (org.apache.flink.runtime.metrics.MetricRegistry)33 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)32