Search in sources :

Example 16 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class CliFrontend method run.

// --------------------------------------------------------------------------------------------
//  Execute Actions
// --------------------------------------------------------------------------------------------
/**
	 * Executions the run action.
	 * 
	 * @param args Command line arguments for the run action.
	 */
protected int run(String[] args) {
    LOG.info("Running 'run' command.");
    RunOptions options;
    try {
        options = CliFrontendParser.parseRunCommand(args);
    } catch (CliArgsException e) {
        return handleArgException(e);
    } catch (Throwable t) {
        return handleError(t);
    }
    // evaluate help flag
    if (options.isPrintHelp()) {
        CliFrontendParser.printHelpForRun();
        return 0;
    }
    if (options.getJarFilePath() == null) {
        return handleArgException(new CliArgsException("The program JAR file was not specified."));
    }
    PackagedProgram program;
    try {
        LOG.info("Building program from JAR file");
        program = buildProgram(options);
    } catch (FileNotFoundException e) {
        return handleArgException(e);
    } catch (Throwable t) {
        return handleError(t);
    }
    ClusterClient client = null;
    try {
        client = createClient(options, program);
        client.setPrintStatusDuringExecution(options.getStdoutLogging());
        client.setDetached(options.getDetachedMode());
        LOG.debug("Client slots is set to {}", client.getMaxSlots());
        LOG.debug(options.getSavepointRestoreSettings().toString());
        int userParallelism = options.getParallelism();
        LOG.debug("User parallelism is set to {}", userParallelism);
        if (client.getMaxSlots() != -1 && userParallelism == -1) {
            logAndSysout("Using the parallelism provided by the remote cluster (" + client.getMaxSlots() + "). " + "To use another parallelism, set it at the ./bin/flink client.");
            userParallelism = client.getMaxSlots();
        }
        return executeProgram(program, client, userParallelism);
    } catch (Throwable t) {
        return handleError(t);
    } finally {
        if (client != null) {
            client.shutdown();
        }
        if (program != null) {
            program.deleteExtractedLibraries();
        }
    }
}
Also used : PackagedProgram(org.apache.flink.client.program.PackagedProgram) ClusterClient(org.apache.flink.client.program.ClusterClient) FileNotFoundException(java.io.FileNotFoundException) CliArgsException(org.apache.flink.client.cli.CliArgsException) RunOptions(org.apache.flink.client.cli.RunOptions) TriggerSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.TriggerSavepoint) DisposeSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint) CancelJobWithSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.CancelJobWithSavepoint)

Example 17 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class CliFrontendAddressConfigurationTest method testValidConfig.

@Test
public void testValidConfig() {
    try {
        CliFrontend frontend = new CliFrontend(CliFrontendTestUtils.getConfigDir());
        RunOptions options = CliFrontendParser.parseRunCommand(new String[] {});
        ClusterClient clusterClient = frontend.retrieveClient(options);
        checkJobManagerAddress(clusterClient.getFlinkConfiguration(), CliFrontendTestUtils.TEST_JOB_MANAGER_ADDRESS, CliFrontendTestUtils.TEST_JOB_MANAGER_PORT);
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ClusterClient(org.apache.flink.client.program.ClusterClient) RunOptions(org.apache.flink.client.cli.RunOptions) IllegalConfigurationException(org.apache.flink.configuration.IllegalConfigurationException) Test(org.junit.Test)

Example 18 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class FlinkClient method killTopologyWithOpts.

public void killTopologyWithOpts(final String name, final KillOptions options) throws NotAliveException {
    final JobID jobId = this.getTopologyJobId(name);
    if (jobId == null) {
        throw new NotAliveException("Storm topology with name " + name + " not found.");
    }
    if (options != null) {
        try {
            Thread.sleep(1000 * options.get_wait_secs());
        } catch (final InterruptedException e) {
            throw new RuntimeException(e);
        }
    }
    final Configuration configuration = GlobalConfiguration.loadConfiguration();
    configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, this.jobManagerHost);
    configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, this.jobManagerPort);
    final ClusterClient client;
    try {
        client = new StandaloneClusterClient(configuration);
    } catch (final IOException e) {
        throw new RuntimeException("Could not establish a connection to the job manager", e);
    }
    try {
        client.stop(jobId);
    } catch (final Exception e) {
        throw new RuntimeException("Cannot stop job.", e);
    }
}
Also used : StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) ClusterClient(org.apache.flink.client.program.ClusterClient) NotAliveException(org.apache.storm.generated.NotAliveException) Configuration(org.apache.flink.configuration.Configuration) GlobalConfiguration(org.apache.flink.configuration.GlobalConfiguration) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) IOException(java.io.IOException) JobID(org.apache.flink.api.common.JobID) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) IOException(java.io.IOException) NotAliveException(org.apache.storm.generated.NotAliveException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException)

Example 19 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class JobRetrievalITCase method testJobRetrieval.

@Test
public void testJobRetrieval() throws Exception {
    final JobID jobID = new JobID();
    final JobVertex imalock = new JobVertex("imalock");
    imalock.setInvokableClass(SemaphoreInvokable.class);
    final JobGraph jobGraph = new JobGraph(jobID, "testjob", imalock);
    final ClusterClient client = new StandaloneClusterClient(cluster.configuration());
    // acquire the lock to make sure that the job cannot complete until the job client
    // has been attached in resumingThread
    lock.acquire();
    client.runDetached(jobGraph, JobRetrievalITCase.class.getClassLoader());
    final Thread resumingThread = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                assertNotNull(client.retrieveJob(jobID));
            } catch (Throwable e) {
                fail(e.getMessage());
            }
        }
    });
    final Seq<ActorSystem> actorSystemSeq = cluster.jobManagerActorSystems().get();
    final ActorSystem actorSystem = actorSystemSeq.last();
    JavaTestKit testkit = new JavaTestKit(actorSystem);
    final ActorRef jm = cluster.getJobManagersAsJava().get(0);
    // wait until client connects
    jm.tell(TestingJobManagerMessages.getNotifyWhenClientConnects(), testkit.getRef());
    // confirm registration
    testkit.expectMsgEquals(true);
    // kick off resuming
    resumingThread.start();
    // wait for client to connect
    testkit.expectMsgAllOf(TestingJobManagerMessages.getClientConnected(), TestingJobManagerMessages.getClassLoadingPropsDelivered());
    // client has connected, we can release the lock
    lock.release();
    resumingThread.join();
}
Also used : ActorSystem(akka.actor.ActorSystem) ActorRef(akka.actor.ActorRef) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) ClusterClient(org.apache.flink.client.program.ClusterClient) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) JobID(org.apache.flink.api.common.JobID) JavaTestKit(akka.testkit.JavaTestKit) Test(org.junit.Test)

Example 20 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class AbstractQueryableStateTestBase method testDuplicateRegistrationFailsJob.

/**
 * Tests that duplicate query registrations fail the job at the JobManager.
 */
@Test(timeout = 60_000)
public void testDuplicateRegistrationFailsJob() throws Exception {
    final int numKeys = 256;
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStateBackend(stateBackend);
    env.setParallelism(maxParallelism);
    // Very important, because cluster is shared between tests and we
    // don't explicitly check that all slots are available before
    // submitting.
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
    DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestKeyRangeSource(numKeys));
    // Reducing state
    ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any-name", new SumReduce(), source.getType());
    final String queryName = "duplicate-me";
    final QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

        private static final long serialVersionUID = -4126824763829132959L;

        @Override
        public Integer getKey(Tuple2<Integer, Long> value) {
            return value.f0;
        }
    }).asQueryableState(queryName, reducingState);
    final QueryableStateStream<Integer, Tuple2<Integer, Long>> duplicate = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

        private static final long serialVersionUID = -6265024000462809436L;

        @Override
        public Integer getKey(Tuple2<Integer, Long> value) {
            return value.f0;
        }
    }).asQueryableState(queryName);
    // Submit the job graph
    final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    clusterClient.submitJob(jobGraph).thenCompose(clusterClient::requestJobResult).thenApply(JobResult::getSerializedThrowable).thenAccept(serializedThrowable -> {
        assertTrue(serializedThrowable.isPresent());
        final Throwable t = serializedThrowable.get().deserializeError(getClass().getClassLoader());
        final String failureCause = ExceptionUtils.stringifyException(t);
        assertThat(failureCause, containsString("KvState with name '" + queryName + "' has already been registered by another operator"));
    }).get();
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) Arrays(java.util.Arrays) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) Tuple2(org.apache.flink.api.java.tuple.Tuple2) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) ClassLoaderUtils(org.apache.flink.testutils.ClassLoaderUtils) ExceptionUtils(org.apache.flink.util.ExceptionUtils) BasicTypeInfo(org.apache.flink.api.common.typeinfo.BasicTypeInfo) Assert.assertThat(org.junit.Assert.assertThat) ListState(org.apache.flink.api.common.state.ListState) AggregateFunction(org.apache.flink.api.common.functions.AggregateFunction) StateBackend(org.apache.flink.runtime.state.StateBackend) URLClassLoader(java.net.URLClassLoader) AggregatingState(org.apache.flink.api.common.state.AggregatingState) CheckpointListener(org.apache.flink.api.common.state.CheckpointListener) ReducingState(org.apache.flink.api.common.state.ReducingState) QueryableStateStream(org.apache.flink.streaming.api.datastream.QueryableStateStream) Duration(java.time.Duration) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) Assert.fail(org.junit.Assert.fail) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) ClassRule(org.junit.ClassRule) State(org.apache.flink.api.common.state.State) KeySelector(org.apache.flink.api.java.functions.KeySelector) ScheduledExecutor(org.apache.flink.util.concurrent.ScheduledExecutor) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) CancellationException(java.util.concurrent.CancellationException) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Preconditions(org.apache.flink.util.Preconditions) Executors(java.util.concurrent.Executors) Serializable(java.io.Serializable) TestingUtils(org.apache.flink.testutils.TestingUtils) VoidNamespaceSerializer(org.apache.flink.queryablestate.client.VoidNamespaceSerializer) List(java.util.List) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) VoidNamespace(org.apache.flink.queryablestate.client.VoidNamespace) Time(org.apache.flink.api.common.time.Time) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) AtomicLongArray(java.util.concurrent.atomic.AtomicLongArray) ScheduledExecutorServiceAdapter(org.apache.flink.util.concurrent.ScheduledExecutorServiceAdapter) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) JobStatus(org.apache.flink.api.common.JobStatus) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) MapStateDescriptor(org.apache.flink.api.common.state.MapStateDescriptor) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) JobResult(org.apache.flink.runtime.jobmaster.JobResult) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) Collector(org.apache.flink.util.Collector) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) RichParallelSourceFunction(org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ProcessFunction(org.apache.flink.streaming.api.functions.ProcessFunction) ReduceFunction(org.apache.flink.api.common.functions.ReduceFunction) AggregatingStateDescriptor(org.apache.flink.api.common.state.AggregatingStateDescriptor) Before(org.junit.Before) Serializer(com.esotericsoftware.kryo.Serializer) StateDescriptor(org.apache.flink.api.common.state.StateDescriptor) GenericTypeInfo(org.apache.flink.api.java.typeutils.GenericTypeInfo) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Configuration(org.apache.flink.configuration.Configuration) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) UnknownKeyOrNamespaceException(org.apache.flink.queryablestate.exceptions.UnknownKeyOrNamespaceException) AbstractStreamOperator(org.apache.flink.streaming.api.operators.AbstractStreamOperator) DataStream(org.apache.flink.streaming.api.datastream.DataStream) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) JobID(org.apache.flink.api.common.JobID) Ignore(org.junit.Ignore) MapState(org.apache.flink.api.common.state.MapState) Assert(org.junit.Assert) QueryableStateClient(org.apache.flink.queryablestate.client.QueryableStateClient) TemporaryFolder(org.junit.rules.TemporaryFolder) Assert.assertEquals(org.junit.Assert.assertEquals) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) JobResult(org.apache.flink.runtime.jobmaster.JobResult) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) KeySelector(org.apache.flink.api.java.functions.KeySelector) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Tuple2(org.apache.flink.api.java.tuple.Tuple2) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Aggregations

ClusterClient (org.apache.flink.client.program.ClusterClient)22 Configuration (org.apache.flink.configuration.Configuration)14 Test (org.junit.Test)14 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)10 IOException (java.io.IOException)7 JobID (org.apache.flink.api.common.JobID)7 Arrays (java.util.Arrays)6 CompletableFuture (java.util.concurrent.CompletableFuture)6 ValueState (org.apache.flink.api.common.state.ValueState)6 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)6 StandaloneClusterClient (org.apache.flink.client.program.StandaloneClusterClient)6 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)6 List (java.util.List)5 ListState (org.apache.flink.api.common.state.ListState)5 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)5 DataStream (org.apache.flink.streaming.api.datastream.DataStream)5 Collector (org.apache.flink.util.Collector)5 File (java.io.File)4 FileNotFoundException (java.io.FileNotFoundException)4 Collection (java.util.Collection)4