use of org.apache.flink.client.program.StandaloneClusterClient in project flink by apache.
the class JobRetrievalITCase method testNonExistingJobRetrieval.
@Test
public void testNonExistingJobRetrieval() throws Exception {
final JobID jobID = new JobID();
ClusterClient client = new StandaloneClusterClient(cluster.configuration());
try {
client.retrieveJob(jobID);
fail();
} catch (JobRetrievalException e) {
// this is what we want
}
}
use of org.apache.flink.client.program.StandaloneClusterClient in project flink by apache.
the class RemoteExecutor method start.
// ------------------------------------------------------------------------
// Startup & Shutdown
// ------------------------------------------------------------------------
@Override
public void start() throws Exception {
synchronized (lock) {
if (client == null) {
client = new StandaloneClusterClient(clientConfiguration);
client.setPrintStatusDuringExecution(isPrintingStatusDuringExecution());
} else {
throw new IllegalStateException("The remote executor was already started.");
}
}
}
use of org.apache.flink.client.program.StandaloneClusterClient in project flink by apache.
the class AvroExternalJarProgramITCase method testExternalProgram.
@Test
public void testExternalProgram() {
LocalFlinkMiniCluster testMiniCluster = null;
try {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 4);
testMiniCluster = new LocalFlinkMiniCluster(config, false);
testMiniCluster.start();
String jarFile = JAR_FILE;
String testData = getClass().getResource(TEST_DATA_FILE).toString();
PackagedProgram program = new PackagedProgram(new File(jarFile), new String[] { testData });
config.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "localhost");
config.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, testMiniCluster.getLeaderRPCPort());
ClusterClient client = new StandaloneClusterClient(config);
client.setPrintStatusDuringExecution(false);
client.run(program, 4);
} catch (Throwable t) {
System.err.println(t.getMessage());
t.printStackTrace();
Assert.fail("Error during the packaged program execution: " + t.getMessage());
} finally {
if (testMiniCluster != null) {
try {
testMiniCluster.stop();
} catch (Throwable t) {
// ignore
}
}
}
}
use of org.apache.flink.client.program.StandaloneClusterClient in project flink by apache.
the class FlinkClient method submitTopologyWithOpts.
/**
* Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support
* uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted.
*/
public void submitTopologyWithOpts(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException {
if (this.getTopologyJobId(name) != null) {
throw new AlreadyAliveException();
}
final URI uploadedJarUri;
final URL uploadedJarUrl;
try {
uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
uploadedJarUrl = uploadedJarUri.toURL();
JobWithJars.checkJarFile(uploadedJarUrl);
} catch (final IOException e) {
throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
}
try {
FlinkClient.addStormConfigToTopology(topology, conf);
} catch (ClassNotFoundException e) {
LOG.error("Could not register class for Kryo serialization.", e);
throw new InvalidTopologyException("Could not register class for Kryo serialization.");
}
final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
streamGraph.setJobName(name);
final JobGraph jobGraph = streamGraph.getJobGraph();
jobGraph.addJar(new Path(uploadedJarUri));
final Configuration configuration = jobGraph.getJobConfiguration();
configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);
final ClusterClient client;
try {
client = new StandaloneClusterClient(configuration);
} catch (final IOException e) {
throw new RuntimeException("Could not establish a connection to the job manager", e);
}
try {
ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader(Collections.<URL>singletonList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader());
client.runDetached(jobGraph, classLoader);
} catch (final ProgramInvocationException e) {
throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
}
}
use of org.apache.flink.client.program.StandaloneClusterClient in project flink by apache.
the class RemoteStreamEnvironment method executeRemotely.
/**
* Executes the remote job.
*
* @param streamGraph
* Stream Graph to execute
* @param jarFiles
* List of jar file URLs to ship to the cluster
* @return The result of the job execution, containing elapsed time and accumulators.
*/
protected JobExecutionResult executeRemotely(StreamGraph streamGraph, List<URL> jarFiles) throws ProgramInvocationException {
if (LOG.isInfoEnabled()) {
LOG.info("Running remotely at {}:{}", host, port);
}
ClassLoader usercodeClassLoader = JobWithJars.buildUserCodeClassLoader(jarFiles, globalClasspaths, getClass().getClassLoader());
Configuration configuration = new Configuration();
configuration.addAll(this.clientConfiguration);
configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, host);
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, port);
ClusterClient client;
try {
client = new StandaloneClusterClient(configuration);
client.setPrintStatusDuringExecution(getConfig().isSysoutLoggingEnabled());
} catch (Exception e) {
throw new ProgramInvocationException("Cannot establish connection to JobManager: " + e.getMessage(), e);
}
try {
return client.run(streamGraph, jarFiles, globalClasspaths, usercodeClassLoader).getJobExecutionResult();
} catch (ProgramInvocationException e) {
throw e;
} catch (Exception e) {
String term = e.getMessage() == null ? "." : (": " + e.getMessage());
throw new ProgramInvocationException("The program execution failed" + term, e);
} finally {
client.shutdown();
}
}
Aggregations