use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class CliFrontend method run.
// --------------------------------------------------------------------------------------------
// Execute Actions
// --------------------------------------------------------------------------------------------
/**
* Executions the run action.
*
* @param args Command line arguments for the run action.
*/
protected int run(String[] args) {
LOG.info("Running 'run' command.");
RunOptions options;
try {
options = CliFrontendParser.parseRunCommand(args);
} catch (CliArgsException e) {
return handleArgException(e);
} catch (Throwable t) {
return handleError(t);
}
// evaluate help flag
if (options.isPrintHelp()) {
CliFrontendParser.printHelpForRun();
return 0;
}
if (options.getJarFilePath() == null) {
return handleArgException(new CliArgsException("The program JAR file was not specified."));
}
PackagedProgram program;
try {
LOG.info("Building program from JAR file");
program = buildProgram(options);
} catch (FileNotFoundException e) {
return handleArgException(e);
} catch (Throwable t) {
return handleError(t);
}
ClusterClient client = null;
try {
client = createClient(options, program);
client.setPrintStatusDuringExecution(options.getStdoutLogging());
client.setDetached(options.getDetachedMode());
LOG.debug("Client slots is set to {}", client.getMaxSlots());
LOG.debug(options.getSavepointRestoreSettings().toString());
int userParallelism = options.getParallelism();
LOG.debug("User parallelism is set to {}", userParallelism);
if (client.getMaxSlots() != -1 && userParallelism == -1) {
logAndSysout("Using the parallelism provided by the remote cluster (" + client.getMaxSlots() + "). " + "To use another parallelism, set it at the ./bin/flink client.");
userParallelism = client.getMaxSlots();
}
return executeProgram(program, client, userParallelism);
} catch (Throwable t) {
return handleError(t);
} finally {
if (client != null) {
client.shutdown();
}
if (program != null) {
program.deleteExtractedLibraries();
}
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class CliFrontendAddressConfigurationTest method testValidConfig.
@Test
public void testValidConfig() {
try {
CliFrontend frontend = new CliFrontend(CliFrontendTestUtils.getConfigDir());
RunOptions options = CliFrontendParser.parseRunCommand(new String[] {});
ClusterClient clusterClient = frontend.retrieveClient(options);
checkJobManagerAddress(clusterClient.getFlinkConfiguration(), CliFrontendTestUtils.TEST_JOB_MANAGER_ADDRESS, CliFrontendTestUtils.TEST_JOB_MANAGER_PORT);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class FlinkClient method killTopologyWithOpts.
public void killTopologyWithOpts(final String name, final KillOptions options) throws NotAliveException {
final JobID jobId = this.getTopologyJobId(name);
if (jobId == null) {
throw new NotAliveException("Storm topology with name " + name + " not found.");
}
if (options != null) {
try {
Thread.sleep(1000 * options.get_wait_secs());
} catch (final InterruptedException e) {
throw new RuntimeException(e);
}
}
final Configuration configuration = GlobalConfiguration.loadConfiguration();
configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, this.jobManagerHost);
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, this.jobManagerPort);
final ClusterClient client;
try {
client = new StandaloneClusterClient(configuration);
} catch (final IOException e) {
throw new RuntimeException("Could not establish a connection to the job manager", e);
}
try {
client.stop(jobId);
} catch (final Exception e) {
throw new RuntimeException("Cannot stop job.", e);
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class JobRetrievalITCase method testJobRetrieval.
@Test
public void testJobRetrieval() throws Exception {
final JobID jobID = new JobID();
final JobVertex imalock = new JobVertex("imalock");
imalock.setInvokableClass(SemaphoreInvokable.class);
final JobGraph jobGraph = new JobGraph(jobID, "testjob", imalock);
final ClusterClient client = new StandaloneClusterClient(cluster.configuration());
// acquire the lock to make sure that the job cannot complete until the job client
// has been attached in resumingThread
lock.acquire();
client.runDetached(jobGraph, JobRetrievalITCase.class.getClassLoader());
final Thread resumingThread = new Thread(new Runnable() {
@Override
public void run() {
try {
assertNotNull(client.retrieveJob(jobID));
} catch (Throwable e) {
fail(e.getMessage());
}
}
});
final Seq<ActorSystem> actorSystemSeq = cluster.jobManagerActorSystems().get();
final ActorSystem actorSystem = actorSystemSeq.last();
JavaTestKit testkit = new JavaTestKit(actorSystem);
final ActorRef jm = cluster.getJobManagersAsJava().get(0);
// wait until client connects
jm.tell(TestingJobManagerMessages.getNotifyWhenClientConnects(), testkit.getRef());
// confirm registration
testkit.expectMsgEquals(true);
// kick off resuming
resumingThread.start();
// wait for client to connect
testkit.expectMsgAllOf(TestingJobManagerMessages.getClientConnected(), TestingJobManagerMessages.getClassLoadingPropsDelivered());
// client has connected, we can release the lock
lock.release();
resumingThread.join();
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class AbstractQueryableStateTestBase method testDuplicateRegistrationFailsJob.
/**
* Tests that duplicate query registrations fail the job at the JobManager.
*/
@Test(timeout = 60_000)
public void testDuplicateRegistrationFailsJob() throws Exception {
final int numKeys = 256;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestKeyRangeSource(numKeys));
// Reducing state
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any-name", new SumReduce(), source.getType());
final String queryName = "duplicate-me";
final QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = -4126824763829132959L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState(queryName, reducingState);
final QueryableStateStream<Integer, Tuple2<Integer, Long>> duplicate = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = -6265024000462809436L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState(queryName);
// Submit the job graph
final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
clusterClient.submitJob(jobGraph).thenCompose(clusterClient::requestJobResult).thenApply(JobResult::getSerializedThrowable).thenAccept(serializedThrowable -> {
assertTrue(serializedThrowable.isPresent());
final Throwable t = serializedThrowable.get().deserializeError(getClass().getClassLoader());
final String failureCause = ExceptionUtils.stringifyException(t);
assertThat(failureCause, containsString("KvState with name '" + queryName + "' has already been registered by another operator"));
}).get();
}
Aggregations