use of org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint in project flink by apache.
the class CliFrontend method disposeSavepoint.
/**
* Sends a {@link org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint}
* message to the job manager.
*/
private int disposeSavepoint(SavepointOptions options) {
try {
String savepointPath = options.getSavepointPath();
if (savepointPath == null) {
throw new IllegalArgumentException("Missing required argument: savepoint path. " + "Usage: bin/flink savepoint -d <savepoint-path>");
}
String jarFile = options.getJarFilePath();
ActorGateway jobManager = getJobManagerGateway(options);
List<BlobKey> blobKeys = null;
if (jarFile != null) {
logAndSysout("Disposing savepoint '" + savepointPath + "' with JAR " + jarFile + ".");
List<File> libs = null;
try {
libs = PackagedProgram.extractContainedLibraries(new File(jarFile).toURI().toURL());
if (!libs.isEmpty()) {
List<Path> libPaths = new ArrayList<>(libs.size());
for (File f : libs) {
libPaths.add(new Path(f.toURI()));
}
logAndSysout("Uploading JAR files.");
LOG.debug("JAR files: " + libPaths);
blobKeys = BlobClient.uploadJarFiles(jobManager, clientTimeout, config, libPaths);
LOG.debug("Blob keys: " + blobKeys.toString());
}
} finally {
if (libs != null) {
PackagedProgram.deleteExtractedLibraries(libs);
}
}
} else {
logAndSysout("Disposing savepoint '" + savepointPath + "'.");
}
Object msg = new DisposeSavepoint(savepointPath);
Future<Object> response = jobManager.ask(msg, clientTimeout);
Object result;
try {
logAndSysout("Waiting for response...");
result = Await.result(response, clientTimeout);
} catch (Exception e) {
throw new Exception("Disposing the savepoint with path" + savepointPath + " failed.", e);
}
if (result.getClass() == JobManagerMessages.getDisposeSavepointSuccess().getClass()) {
logAndSysout("Savepoint '" + savepointPath + "' disposed.");
return 0;
} else if (result instanceof DisposeSavepointFailure) {
DisposeSavepointFailure failure = (DisposeSavepointFailure) result;
if (failure.cause() instanceof ClassNotFoundException) {
throw new ClassNotFoundException("Savepoint disposal failed, because of a " + "missing class. This is most likely caused by a custom state " + "instance, which cannot be disposed without the user code class " + "loader. Please provide the program jar with which you have created " + "the savepoint via -j <JAR> for disposal.", failure.cause().getCause());
} else {
throw failure.cause();
}
} else {
throw new IllegalStateException("Unknown JobManager response of type " + result.getClass());
}
} catch (Throwable t) {
return handleError(t);
}
}
use of org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint in project flink by apache.
the class CliFrontendSavepointTest method testDisposeSavepointSuccess.
// ------------------------------------------------------------------------
// Dispose savepoint
// ------------------------------------------------------------------------
@Test
public void testDisposeSavepointSuccess() throws Exception {
replaceStdOutAndStdErr();
try {
String savepointPath = "expectedSavepointPath";
ActorGateway jobManager = mock(ActorGateway.class);
Promise<Object> triggerResponse = new scala.concurrent.impl.Promise.DefaultPromise<>();
when(jobManager.ask(Mockito.eq(new DisposeSavepoint(savepointPath)), any(FiniteDuration.class))).thenReturn(triggerResponse.future());
triggerResponse.success(getDisposeSavepointSuccess());
CliFrontend frontend = new MockCliFrontend(CliFrontendTestUtils.getConfigDir(), jobManager);
String[] parameters = { "-d", savepointPath };
int returnCode = frontend.savepoint(parameters);
assertEquals(0, returnCode);
verify(jobManager, times(1)).ask(Mockito.eq(new DisposeSavepoint(savepointPath)), any(FiniteDuration.class));
String outMsg = buffer.toString();
assertTrue(outMsg.contains(savepointPath));
assertTrue(outMsg.contains("disposed"));
} finally {
restoreStdOutAndStdErr();
}
}
use of org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint in project flink by apache.
the class CliFrontendSavepointTest method testDisposeSavepointFailure.
@Test
public void testDisposeSavepointFailure() throws Exception {
replaceStdOutAndStdErr();
try {
String savepointPath = "expectedSavepointPath";
ActorGateway jobManager = mock(ActorGateway.class);
Promise<Object> triggerResponse = new scala.concurrent.impl.Promise.DefaultPromise<>();
when(jobManager.ask(Mockito.eq(new DisposeSavepoint(savepointPath)), any(FiniteDuration.class))).thenReturn(triggerResponse.future());
Exception testException = new Exception("expectedTestException");
triggerResponse.success(new DisposeSavepointFailure(testException));
CliFrontend frontend = new MockCliFrontend(CliFrontendTestUtils.getConfigDir(), jobManager);
String[] parameters = { "-d", savepointPath };
int returnCode = frontend.savepoint(parameters);
assertTrue(returnCode != 0);
verify(jobManager, times(1)).ask(Mockito.eq(new DisposeSavepoint(savepointPath)), any(FiniteDuration.class));
assertTrue(buffer.toString().contains("expectedTestException"));
} finally {
restoreStdOutAndStdErr();
}
}
use of org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint in project flink by apache.
the class CliFrontendSavepointTest method testDisposeSavepointFailureUnknownResponse.
@Test
public void testDisposeSavepointFailureUnknownResponse() throws Exception {
replaceStdOutAndStdErr();
try {
String savepointPath = "expectedSavepointPath";
ActorGateway jobManager = mock(ActorGateway.class);
Promise<Object> triggerResponse = new scala.concurrent.impl.Promise.DefaultPromise<>();
when(jobManager.ask(Mockito.eq(new DisposeSavepoint(savepointPath)), any(FiniteDuration.class))).thenReturn(triggerResponse.future());
triggerResponse.success("UNKNOWN RESPONSE");
CliFrontend frontend = new MockCliFrontend(CliFrontendTestUtils.getConfigDir(), jobManager);
String[] parameters = { "-d", savepointPath };
int returnCode = frontend.savepoint(parameters);
assertTrue(returnCode != 0);
verify(jobManager, times(1)).ask(Mockito.eq(new DisposeSavepoint(savepointPath)), any(FiniteDuration.class));
String errMsg = buffer.toString();
assertTrue(errMsg.contains("IllegalStateException"));
assertTrue(errMsg.contains("Unknown JobManager response"));
} finally {
restoreStdOutAndStdErr();
}
replaceStdOutAndStdErr();
}
use of org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint in project flink by apache.
the class SavepointITCase method testTriggerSavepointAndResumeWithFileBasedCheckpoints.
/**
* Triggers a savepoint for a job that uses the FsStateBackend. We expect
* that all checkpoint files are written to a new savepoint directory.
*
* <ol>
* <li>Submit job, wait for some progress</li>
* <li>Trigger savepoint and verify that savepoint has been created</li>
* <li>Shut down the cluster, re-submit the job from the savepoint,
* verify that the initial state has been reset, and
* all tasks are running again</li>
* <li>Cancel job, dispose the savepoint, and verify that everything
* has been cleaned up</li>
* </ol>
*/
@Test
public void testTriggerSavepointAndResumeWithFileBasedCheckpoints() throws Exception {
// Config
final int numTaskManagers = 2;
final int numSlotsPerTaskManager = 2;
final int parallelism = numTaskManagers * numSlotsPerTaskManager;
final Deadline deadline = new FiniteDuration(5, TimeUnit.MINUTES).fromNow();
final File testRoot = folder.newFolder();
TestingCluster flink = null;
try {
// Create a test actor system
ActorSystem testActorSystem = AkkaUtils.createDefaultActorSystem();
// Flink configuration
final Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTaskManagers);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTaskManager);
final File checkpointDir = new File(testRoot, "checkpoints");
final File savepointRootDir = new File(testRoot, "savepoints");
if (!checkpointDir.mkdir() || !savepointRootDir.mkdirs()) {
fail("Test setup failed: failed to create temporary directories.");
}
// Use file based checkpoints
config.setString(CoreOptions.STATE_BACKEND, "filesystem");
config.setString(FsStateBackendFactory.CHECKPOINT_DIRECTORY_URI_CONF_KEY, checkpointDir.toURI().toString());
config.setString(FsStateBackendFactory.MEMORY_THRESHOLD_CONF_KEY, "0");
config.setString(ConfigConstants.SAVEPOINT_DIRECTORY_KEY, savepointRootDir.toURI().toString());
// Start Flink
flink = new TestingCluster(config);
flink.start(true);
// Submit the job
final JobGraph jobGraph = createJobGraph(parallelism, 0, 1000);
final JobID jobId = jobGraph.getJobID();
// Reset the static test job helpers
StatefulCounter.resetForTest(parallelism);
// Retrieve the job manager
ActorGateway jobManager = Await.result(flink.leaderGateway().future(), deadline.timeLeft());
LOG.info("Submitting job " + jobGraph.getJobID() + " in detached mode.");
flink.submitJobDetached(jobGraph);
LOG.info("Waiting for some progress.");
// wait for the JobManager to be ready
Future<Object> allRunning = jobManager.ask(new WaitForAllVerticesToBeRunning(jobId), deadline.timeLeft());
Await.ready(allRunning, deadline.timeLeft());
// wait for the Tasks to be ready
StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
LOG.info("Triggering a savepoint.");
Future<Object> savepointPathFuture = jobManager.ask(new TriggerSavepoint(jobId, Option.<String>empty()), deadline.timeLeft());
final String savepointPath = ((TriggerSavepointSuccess) Await.result(savepointPathFuture, deadline.timeLeft())).savepointPath();
LOG.info("Retrieved savepoint path: " + savepointPath + ".");
// Retrieve the savepoint from the testing job manager
LOG.info("Requesting the savepoint.");
Future<Object> savepointFuture = jobManager.ask(new RequestSavepoint(savepointPath), deadline.timeLeft());
SavepointV1 savepoint = (SavepointV1) ((ResponseSavepoint) Await.result(savepointFuture, deadline.timeLeft())).savepoint();
LOG.info("Retrieved savepoint: " + savepointPath + ".");
// Shut down the Flink cluster (thereby canceling the job)
LOG.info("Shutting down Flink cluster.");
flink.shutdown();
flink.awaitTermination();
// - Verification START -------------------------------------------
// Only one savepoint should exist
File[] files = savepointRootDir.listFiles();
if (files != null) {
assertEquals("Savepoint not created in expected directory", 1, files.length);
assertTrue("Savepoint did not create self-contained directory", files[0].isDirectory());
File savepointDir = files[0];
File[] savepointFiles = savepointDir.listFiles();
assertNotNull(savepointFiles);
// Expect one metadata file and one checkpoint file per stateful
// parallel subtask
String errMsg = "Did not write expected number of savepoint/checkpoint files to directory: " + Arrays.toString(savepointFiles);
assertEquals(errMsg, 1 + parallelism, savepointFiles.length);
} else {
fail("Savepoint not created in expected directory");
}
// We currently have the following directory layout: checkpointDir/jobId/chk-ID
File jobCheckpoints = new File(checkpointDir, jobId.toString());
if (jobCheckpoints.exists()) {
files = jobCheckpoints.listFiles();
assertNotNull("Checkpoint directory empty", files);
assertEquals("Checkpoints directory not clean: " + Arrays.toString(files), 0, files.length);
}
// - Verification END ---------------------------------------------
// Restart the cluster
LOG.info("Restarting Flink cluster.");
flink.start();
// Retrieve the job manager
LOG.info("Retrieving JobManager.");
jobManager = Await.result(flink.leaderGateway().future(), deadline.timeLeft());
LOG.info("JobManager: " + jobManager + ".");
// Reset static test helpers
StatefulCounter.resetForTest(parallelism);
// Gather all task deployment descriptors
final Throwable[] error = new Throwable[1];
final TestingCluster finalFlink = flink;
final Multimap<JobVertexID, TaskDeploymentDescriptor> tdds = HashMultimap.create();
new JavaTestKit(testActorSystem) {
{
new Within(deadline.timeLeft()) {
@Override
protected void run() {
try {
// Register to all submit task messages for job
for (ActorRef taskManager : finalFlink.getTaskManagersAsJava()) {
taskManager.tell(new TestingTaskManagerMessages.RegisterSubmitTaskListener(jobId), getTestActor());
}
// Set the savepoint path
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath));
LOG.info("Resubmitting job " + jobGraph.getJobID() + " with " + "savepoint path " + savepointPath + " in detached mode.");
// Submit the job
finalFlink.submitJobDetached(jobGraph);
int numTasks = 0;
for (JobVertex jobVertex : jobGraph.getVertices()) {
numTasks += jobVertex.getParallelism();
}
// Gather the task deployment descriptors
LOG.info("Gathering " + numTasks + " submitted " + "TaskDeploymentDescriptor instances.");
for (int i = 0; i < numTasks; i++) {
ResponseSubmitTaskListener resp = (ResponseSubmitTaskListener) expectMsgAnyClassOf(getRemainingTime(), ResponseSubmitTaskListener.class);
TaskDeploymentDescriptor tdd = resp.tdd();
LOG.info("Received: " + tdd.toString() + ".");
TaskInformation taskInformation = tdd.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
tdds.put(taskInformation.getJobVertexId(), tdd);
}
} catch (Throwable t) {
error[0] = t;
}
}
};
}
};
// - Verification START -------------------------------------------
String errMsg = "Error during gathering of TaskDeploymentDescriptors";
assertNull(errMsg, error[0]);
// have a matching task deployment descriptor.
for (TaskState taskState : savepoint.getTaskStates()) {
Collection<TaskDeploymentDescriptor> taskTdds = tdds.get(taskState.getJobVertexID());
errMsg = "Missing task for savepoint state for operator " + taskState.getJobVertexID() + ".";
assertTrue(errMsg, taskTdds.size() > 0);
assertEquals(taskState.getNumberCollectedStates(), taskTdds.size());
for (TaskDeploymentDescriptor tdd : taskTdds) {
SubtaskState subtaskState = taskState.getState(tdd.getSubtaskIndex());
assertNotNull(subtaskState);
errMsg = "Initial operator state mismatch.";
assertEquals(errMsg, subtaskState.getLegacyOperatorState(), tdd.getTaskStateHandles().getLegacyOperatorState());
}
}
// Await state is restored
StatefulCounter.getRestoreLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// Await some progress after restore
StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// - Verification END ---------------------------------------------
LOG.info("Cancelling job " + jobId + ".");
jobManager.tell(new CancelJob(jobId));
LOG.info("Disposing savepoint " + savepointPath + ".");
Future<Object> disposeFuture = jobManager.ask(new DisposeSavepoint(savepointPath), deadline.timeLeft());
errMsg = "Failed to dispose savepoint " + savepointPath + ".";
Object resp = Await.result(disposeFuture, deadline.timeLeft());
assertTrue(errMsg, resp.getClass() == getDisposeSavepointSuccess().getClass());
// - Verification START -------------------------------------------
// The checkpoint files
List<File> checkpointFiles = new ArrayList<>();
for (TaskState stateForTaskGroup : savepoint.getTaskStates()) {
for (SubtaskState subtaskState : stateForTaskGroup.getStates()) {
ChainedStateHandle<StreamStateHandle> streamTaskState = subtaskState.getLegacyOperatorState();
for (int i = 0; i < streamTaskState.getLength(); i++) {
if (streamTaskState.get(i) != null) {
FileStateHandle fileStateHandle = (FileStateHandle) streamTaskState.get(i);
checkpointFiles.add(new File(fileStateHandle.getFilePath().toUri()));
}
}
}
}
// The checkpoint files of the savepoint should have been discarded
for (File f : checkpointFiles) {
errMsg = "Checkpoint file " + f + " not cleaned up properly.";
assertFalse(errMsg, f.exists());
}
if (checkpointFiles.size() > 0) {
File parent = checkpointFiles.get(0).getParentFile();
errMsg = "Checkpoint parent directory " + parent + " not cleaned up properly.";
assertFalse(errMsg, parent.exists());
}
// All savepoints should have been cleaned up
errMsg = "Savepoints directory not cleaned up properly: " + Arrays.toString(savepointRootDir.listFiles()) + ".";
assertEquals(errMsg, 0, savepointRootDir.listFiles().length);
// - Verification END ---------------------------------------------
} finally {
if (flink != null) {
flink.shutdown();
}
}
}
Aggregations