use of org.apache.flink.client.program.MiniClusterClient in project flink by apache.
the class SchedulingITCase method executeSchedulingTest.
private void executeSchedulingTest(Configuration configuration) throws Exception {
configuration.setString(RestOptions.BIND_PORT, "0");
final long slotIdleTimeout = 50L;
configuration.setLong(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout);
configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.parse("1g"));
final int parallelism = 4;
final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder().setConfiguration(configuration).setNumTaskManagers(parallelism).setNumSlotsPerTaskManager(1).build();
try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) {
miniCluster.start();
MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster);
JobGraph jobGraph = createJobGraph(slotIdleTimeout << 1, parallelism);
// wait for the submission to succeed
JobID jobID = miniClusterClient.submitJob(jobGraph).get();
CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobID);
JobResult jobResult = resultFuture.get();
assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
}
}
use of org.apache.flink.client.program.MiniClusterClient in project flink by apache.
the class ClassLoaderITCase method testDisposeSavepointWithCustomKvState.
/**
* Tests disposal of a savepoint, which contains custom user code KvState.
*/
@Test
public void testDisposeSavepointWithCustomKvState() throws Exception {
ClusterClient<?> clusterClient = new MiniClusterClient(new Configuration(), miniClusterResource.getMiniCluster());
Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow();
File checkpointDir = FOLDER.newFolder();
File outputDir = FOLDER.newFolder();
final PackagedProgram program = PackagedProgram.newBuilder().setJarFile(new File(CUSTOM_KV_STATE_JAR_PATH)).setArguments(new String[] { String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString(), // Disable unaligned checkpoints as this test is
"false" // triggering concurrent savepoints/checkpoints
}).build();
TestStreamEnvironment.setAsContext(miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(CUSTOM_KV_STATE_JAR_PATH)), Collections.emptyList());
// Execute detached
Thread invokeThread = new Thread(() -> {
try {
program.invokeInteractiveModeForExecution();
} catch (ProgramInvocationException ex) {
if (ex.getCause() == null || !(ex.getCause() instanceof JobCancellationException)) {
ex.printStackTrace();
}
}
});
LOG.info("Starting program invoke thread");
invokeThread.start();
// The job ID
JobID jobId = null;
LOG.info("Waiting for job status running.");
// Wait for running job
while (jobId == null && deadline.hasTimeLeft()) {
Collection<JobStatusMessage> jobs = clusterClient.listJobs().get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
for (JobStatusMessage job : jobs) {
if (job.getJobState() == JobStatus.RUNNING) {
jobId = job.getJobId();
LOG.info("Job running. ID: " + jobId);
break;
}
}
// Retry if job is not available yet
if (jobId == null) {
Thread.sleep(100L);
}
}
// Trigger savepoint
String savepointPath = null;
for (int i = 0; i < 20; i++) {
LOG.info("Triggering savepoint (" + (i + 1) + "/20).");
try {
savepointPath = clusterClient.triggerSavepoint(jobId, null, SavepointFormatType.CANONICAL).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception cause) {
LOG.info("Failed to trigger savepoint. Retrying...", cause);
// This can fail if the operators are not opened yet
Thread.sleep(500);
}
}
assertNotNull("Failed to trigger savepoint", savepointPath);
clusterClient.disposeSavepoint(savepointPath).get();
clusterClient.cancel(jobId).get();
// make sure, the execution is finished to not influence other test methods
invokeThread.join(deadline.timeLeft().toMillis());
assertFalse("Program invoke thread still running", invokeThread.isAlive());
}
Aggregations