use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class JobRetrievalITCase method testNonExistingJobRetrieval.
@Test
public void testNonExistingJobRetrieval() throws Exception {
final JobID jobID = new JobID();
ClusterClient client = new StandaloneClusterClient(cluster.configuration());
try {
client.retrieveJob(jobID);
fail();
} catch (JobRetrievalException e) {
// this is what we want
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class CliFrontend method retrieveClient.
/**
* Updates the associated configuration with the given command line options
*
* @param options Command line options
*/
protected ClusterClient retrieveClient(CommandLineOptions options) {
CustomCommandLine customCLI = getActiveCustomCommandLine(options.getCommandLine());
try {
ClusterClient client = customCLI.retrieveCluster(options.getCommandLine(), config);
logAndSysout("Using address " + client.getJobManagerAddress() + " to connect to JobManager.");
return client;
} catch (Exception e) {
LOG.error("Couldn't retrieve {} cluster.", customCLI.getId(), e);
throw new IllegalConfigurationException("Couldn't retrieve client for cluster", e);
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class CliFrontendAddressConfigurationTest method testManualOptionsOverridesConfig.
@Test
public void testManualOptionsOverridesConfig() {
try {
CliFrontend frontend = new CliFrontend(CliFrontendTestUtils.getConfigDir());
RunOptions options = CliFrontendParser.parseRunCommand(new String[] { "-m", "203.0.113.22:7788" });
ClusterClient client = frontend.retrieveClient(options);
Configuration config = client.getFlinkConfiguration();
InetSocketAddress expectedAddress = new InetSocketAddress("203.0.113.22", 7788);
checkJobManagerAddress(config, expectedAddress.getHostName(), expectedAddress.getPort());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class AvroExternalJarProgramITCase method testExternalProgram.
@Test
public void testExternalProgram() {
LocalFlinkMiniCluster testMiniCluster = null;
try {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 4);
testMiniCluster = new LocalFlinkMiniCluster(config, false);
testMiniCluster.start();
String jarFile = JAR_FILE;
String testData = getClass().getResource(TEST_DATA_FILE).toString();
PackagedProgram program = new PackagedProgram(new File(jarFile), new String[] { testData });
config.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "localhost");
config.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, testMiniCluster.getLeaderRPCPort());
ClusterClient client = new StandaloneClusterClient(config);
client.setPrintStatusDuringExecution(false);
client.run(program, 4);
} catch (Throwable t) {
System.err.println(t.getMessage());
t.printStackTrace();
Assert.fail("Error during the packaged program execution: " + t.getMessage());
} finally {
if (testMiniCluster != null) {
try {
testMiniCluster.stop();
} catch (Throwable t) {
// ignore
}
}
}
}
use of org.apache.flink.client.program.ClusterClient in project flink by apache.
the class FlinkClient method submitTopologyWithOpts.
/**
* Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support
* uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted.
*/
public void submitTopologyWithOpts(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException {
if (this.getTopologyJobId(name) != null) {
throw new AlreadyAliveException();
}
final URI uploadedJarUri;
final URL uploadedJarUrl;
try {
uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
uploadedJarUrl = uploadedJarUri.toURL();
JobWithJars.checkJarFile(uploadedJarUrl);
} catch (final IOException e) {
throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
}
try {
FlinkClient.addStormConfigToTopology(topology, conf);
} catch (ClassNotFoundException e) {
LOG.error("Could not register class for Kryo serialization.", e);
throw new InvalidTopologyException("Could not register class for Kryo serialization.");
}
final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
streamGraph.setJobName(name);
final JobGraph jobGraph = streamGraph.getJobGraph();
jobGraph.addJar(new Path(uploadedJarUri));
final Configuration configuration = jobGraph.getJobConfiguration();
configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);
final ClusterClient client;
try {
client = new StandaloneClusterClient(configuration);
} catch (final IOException e) {
throw new RuntimeException("Could not establish a connection to the job manager", e);
}
try {
ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader(Collections.<URL>singletonList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader());
client.runDetached(jobGraph, classLoader);
} catch (final ProgramInvocationException e) {
throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
}
}
Aggregations