use of scala.concurrent.duration.Deadline in project flink by apache.
the class SavepointITCase method testTriggerSavepointAndResumeWithFileBasedCheckpoints.
/**
* Triggers a savepoint for a job that uses the FsStateBackend. We expect
* that all checkpoint files are written to a new savepoint directory.
*
* <ol>
* <li>Submit job, wait for some progress</li>
* <li>Trigger savepoint and verify that savepoint has been created</li>
* <li>Shut down the cluster, re-submit the job from the savepoint,
* verify that the initial state has been reset, and
* all tasks are running again</li>
* <li>Cancel job, dispose the savepoint, and verify that everything
* has been cleaned up</li>
* </ol>
*/
@Test
public void testTriggerSavepointAndResumeWithFileBasedCheckpoints() throws Exception {
// Config
final int numTaskManagers = 2;
final int numSlotsPerTaskManager = 2;
final int parallelism = numTaskManagers * numSlotsPerTaskManager;
final Deadline deadline = new FiniteDuration(5, TimeUnit.MINUTES).fromNow();
final File testRoot = folder.newFolder();
TestingCluster flink = null;
try {
// Create a test actor system
ActorSystem testActorSystem = AkkaUtils.createDefaultActorSystem();
// Flink configuration
final Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTaskManagers);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTaskManager);
final File checkpointDir = new File(testRoot, "checkpoints");
final File savepointRootDir = new File(testRoot, "savepoints");
if (!checkpointDir.mkdir() || !savepointRootDir.mkdirs()) {
fail("Test setup failed: failed to create temporary directories.");
}
// Use file based checkpoints
config.setString(CoreOptions.STATE_BACKEND, "filesystem");
config.setString(FsStateBackendFactory.CHECKPOINT_DIRECTORY_URI_CONF_KEY, checkpointDir.toURI().toString());
config.setString(FsStateBackendFactory.MEMORY_THRESHOLD_CONF_KEY, "0");
config.setString(ConfigConstants.SAVEPOINT_DIRECTORY_KEY, savepointRootDir.toURI().toString());
// Start Flink
flink = new TestingCluster(config);
flink.start(true);
// Submit the job
final JobGraph jobGraph = createJobGraph(parallelism, 0, 1000);
final JobID jobId = jobGraph.getJobID();
// Reset the static test job helpers
StatefulCounter.resetForTest(parallelism);
// Retrieve the job manager
ActorGateway jobManager = Await.result(flink.leaderGateway().future(), deadline.timeLeft());
LOG.info("Submitting job " + jobGraph.getJobID() + " in detached mode.");
flink.submitJobDetached(jobGraph);
LOG.info("Waiting for some progress.");
// wait for the JobManager to be ready
Future<Object> allRunning = jobManager.ask(new WaitForAllVerticesToBeRunning(jobId), deadline.timeLeft());
Await.ready(allRunning, deadline.timeLeft());
// wait for the Tasks to be ready
StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
LOG.info("Triggering a savepoint.");
Future<Object> savepointPathFuture = jobManager.ask(new TriggerSavepoint(jobId, Option.<String>empty()), deadline.timeLeft());
final String savepointPath = ((TriggerSavepointSuccess) Await.result(savepointPathFuture, deadline.timeLeft())).savepointPath();
LOG.info("Retrieved savepoint path: " + savepointPath + ".");
// Retrieve the savepoint from the testing job manager
LOG.info("Requesting the savepoint.");
Future<Object> savepointFuture = jobManager.ask(new RequestSavepoint(savepointPath), deadline.timeLeft());
SavepointV1 savepoint = (SavepointV1) ((ResponseSavepoint) Await.result(savepointFuture, deadline.timeLeft())).savepoint();
LOG.info("Retrieved savepoint: " + savepointPath + ".");
// Shut down the Flink cluster (thereby canceling the job)
LOG.info("Shutting down Flink cluster.");
flink.shutdown();
flink.awaitTermination();
// - Verification START -------------------------------------------
// Only one savepoint should exist
File[] files = savepointRootDir.listFiles();
if (files != null) {
assertEquals("Savepoint not created in expected directory", 1, files.length);
assertTrue("Savepoint did not create self-contained directory", files[0].isDirectory());
File savepointDir = files[0];
File[] savepointFiles = savepointDir.listFiles();
assertNotNull(savepointFiles);
// Expect one metadata file and one checkpoint file per stateful
// parallel subtask
String errMsg = "Did not write expected number of savepoint/checkpoint files to directory: " + Arrays.toString(savepointFiles);
assertEquals(errMsg, 1 + parallelism, savepointFiles.length);
} else {
fail("Savepoint not created in expected directory");
}
// We currently have the following directory layout: checkpointDir/jobId/chk-ID
File jobCheckpoints = new File(checkpointDir, jobId.toString());
if (jobCheckpoints.exists()) {
files = jobCheckpoints.listFiles();
assertNotNull("Checkpoint directory empty", files);
assertEquals("Checkpoints directory not clean: " + Arrays.toString(files), 0, files.length);
}
// - Verification END ---------------------------------------------
// Restart the cluster
LOG.info("Restarting Flink cluster.");
flink.start();
// Retrieve the job manager
LOG.info("Retrieving JobManager.");
jobManager = Await.result(flink.leaderGateway().future(), deadline.timeLeft());
LOG.info("JobManager: " + jobManager + ".");
// Reset static test helpers
StatefulCounter.resetForTest(parallelism);
// Gather all task deployment descriptors
final Throwable[] error = new Throwable[1];
final TestingCluster finalFlink = flink;
final Multimap<JobVertexID, TaskDeploymentDescriptor> tdds = HashMultimap.create();
new JavaTestKit(testActorSystem) {
{
new Within(deadline.timeLeft()) {
@Override
protected void run() {
try {
// Register to all submit task messages for job
for (ActorRef taskManager : finalFlink.getTaskManagersAsJava()) {
taskManager.tell(new TestingTaskManagerMessages.RegisterSubmitTaskListener(jobId), getTestActor());
}
// Set the savepoint path
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath));
LOG.info("Resubmitting job " + jobGraph.getJobID() + " with " + "savepoint path " + savepointPath + " in detached mode.");
// Submit the job
finalFlink.submitJobDetached(jobGraph);
int numTasks = 0;
for (JobVertex jobVertex : jobGraph.getVertices()) {
numTasks += jobVertex.getParallelism();
}
// Gather the task deployment descriptors
LOG.info("Gathering " + numTasks + " submitted " + "TaskDeploymentDescriptor instances.");
for (int i = 0; i < numTasks; i++) {
ResponseSubmitTaskListener resp = (ResponseSubmitTaskListener) expectMsgAnyClassOf(getRemainingTime(), ResponseSubmitTaskListener.class);
TaskDeploymentDescriptor tdd = resp.tdd();
LOG.info("Received: " + tdd.toString() + ".");
TaskInformation taskInformation = tdd.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
tdds.put(taskInformation.getJobVertexId(), tdd);
}
} catch (Throwable t) {
error[0] = t;
}
}
};
}
};
// - Verification START -------------------------------------------
String errMsg = "Error during gathering of TaskDeploymentDescriptors";
assertNull(errMsg, error[0]);
// have a matching task deployment descriptor.
for (TaskState taskState : savepoint.getTaskStates()) {
Collection<TaskDeploymentDescriptor> taskTdds = tdds.get(taskState.getJobVertexID());
errMsg = "Missing task for savepoint state for operator " + taskState.getJobVertexID() + ".";
assertTrue(errMsg, taskTdds.size() > 0);
assertEquals(taskState.getNumberCollectedStates(), taskTdds.size());
for (TaskDeploymentDescriptor tdd : taskTdds) {
SubtaskState subtaskState = taskState.getState(tdd.getSubtaskIndex());
assertNotNull(subtaskState);
errMsg = "Initial operator state mismatch.";
assertEquals(errMsg, subtaskState.getLegacyOperatorState(), tdd.getTaskStateHandles().getLegacyOperatorState());
}
}
// Await state is restored
StatefulCounter.getRestoreLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// Await some progress after restore
StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// - Verification END ---------------------------------------------
LOG.info("Cancelling job " + jobId + ".");
jobManager.tell(new CancelJob(jobId));
LOG.info("Disposing savepoint " + savepointPath + ".");
Future<Object> disposeFuture = jobManager.ask(new DisposeSavepoint(savepointPath), deadline.timeLeft());
errMsg = "Failed to dispose savepoint " + savepointPath + ".";
Object resp = Await.result(disposeFuture, deadline.timeLeft());
assertTrue(errMsg, resp.getClass() == getDisposeSavepointSuccess().getClass());
// - Verification START -------------------------------------------
// The checkpoint files
List<File> checkpointFiles = new ArrayList<>();
for (TaskState stateForTaskGroup : savepoint.getTaskStates()) {
for (SubtaskState subtaskState : stateForTaskGroup.getStates()) {
ChainedStateHandle<StreamStateHandle> streamTaskState = subtaskState.getLegacyOperatorState();
for (int i = 0; i < streamTaskState.getLength(); i++) {
if (streamTaskState.get(i) != null) {
FileStateHandle fileStateHandle = (FileStateHandle) streamTaskState.get(i);
checkpointFiles.add(new File(fileStateHandle.getFilePath().toUri()));
}
}
}
}
// The checkpoint files of the savepoint should have been discarded
for (File f : checkpointFiles) {
errMsg = "Checkpoint file " + f + " not cleaned up properly.";
assertFalse(errMsg, f.exists());
}
if (checkpointFiles.size() > 0) {
File parent = checkpointFiles.get(0).getParentFile();
errMsg = "Checkpoint parent directory " + parent + " not cleaned up properly.";
assertFalse(errMsg, parent.exists());
}
// All savepoints should have been cleaned up
errMsg = "Savepoints directory not cleaned up properly: " + Arrays.toString(savepointRootDir.listFiles()) + ".";
assertEquals(errMsg, 0, savepointRootDir.listFiles().length);
// - Verification END ---------------------------------------------
} finally {
if (flink != null) {
flink.shutdown();
}
}
}
use of scala.concurrent.duration.Deadline in project flink by apache.
the class SavepointITCase method testSubmitWithUnknownSavepointPath.
@Test
public void testSubmitWithUnknownSavepointPath() throws Exception {
// Config
int numTaskManagers = 1;
int numSlotsPerTaskManager = 1;
int parallelism = numTaskManagers * numSlotsPerTaskManager;
// Test deadline
final Deadline deadline = new FiniteDuration(5, TimeUnit.MINUTES).fromNow();
final File tmpDir = CommonTestUtils.createTempDirectory();
final File savepointDir = new File(tmpDir, "savepoints");
TestingCluster flink = null;
try {
// Flink configuration
final Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTaskManagers);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTaskManager);
config.setString(ConfigConstants.SAVEPOINT_DIRECTORY_KEY, savepointDir.toURI().toString());
LOG.info("Flink configuration: " + config + ".");
// Start Flink
flink = new TestingCluster(config);
LOG.info("Starting Flink cluster.");
flink.start();
// Retrieve the job manager
LOG.info("Retrieving JobManager.");
ActorGateway jobManager = Await.result(flink.leaderGateway().future(), deadline.timeLeft());
LOG.info("JobManager: " + jobManager + ".");
// High value to ensure timeouts if restarted.
int numberOfRetries = 1000;
// Submit the job
// Long delay to ensure that the test times out if the job
// manager tries to restart the job.
final JobGraph jobGraph = createJobGraph(parallelism, numberOfRetries, 3600000);
// Set non-existing savepoint path
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath("unknown path"));
assertEquals("unknown path", jobGraph.getSavepointRestoreSettings().getRestorePath());
LOG.info("Submitting job " + jobGraph.getJobID() + " in detached mode.");
try {
flink.submitJobAndWait(jobGraph, false);
} catch (Exception e) {
assertEquals(JobExecutionException.class, e.getClass());
assertEquals(FileNotFoundException.class, e.getCause().getClass());
}
} finally {
if (flink != null) {
flink.shutdown();
}
}
}
use of scala.concurrent.duration.Deadline in project flink by apache.
the class ClassLoaderITCase method testDisposeSavepointWithCustomKvState.
/**
* Tests disposal of a savepoint, which contains custom user code KvState.
*/
@Test
public void testDisposeSavepointWithCustomKvState() throws Exception {
Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow();
int port = testCluster.getLeaderRPCPort();
File checkpointDir = FOLDER.newFolder();
File outputDir = FOLDER.newFolder();
final PackagedProgram program = new PackagedProgram(new File(CUSTOM_KV_STATE_JAR_PATH), new String[] { CUSTOM_KV_STATE_JAR_PATH, "localhost", String.valueOf(port), String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString() });
// Execute detached
Thread invokeThread = new Thread(new Runnable() {
@Override
public void run() {
try {
program.invokeInteractiveModeForExecution();
} catch (ProgramInvocationException ignored) {
ignored.printStackTrace();
}
}
});
LOG.info("Starting program invoke thread");
invokeThread.start();
// The job ID
JobID jobId = null;
ActorGateway jm = testCluster.getLeaderGateway(deadline.timeLeft());
LOG.info("Waiting for job status running.");
// Wait for running job
while (jobId == null && deadline.hasTimeLeft()) {
Future<Object> jobsFuture = jm.ask(JobManagerMessages.getRequestRunningJobsStatus(), deadline.timeLeft());
RunningJobsStatus runningJobs = (RunningJobsStatus) Await.result(jobsFuture, deadline.timeLeft());
for (JobStatusMessage runningJob : runningJobs.getStatusMessages()) {
jobId = runningJob.getJobId();
LOG.info("Job running. ID: " + jobId);
break;
}
// Retry if job is not available yet
if (jobId == null) {
Thread.sleep(100);
}
}
LOG.info("Wait for all tasks to be running.");
Future<Object> allRunning = jm.ask(new WaitForAllVerticesToBeRunning(jobId), deadline.timeLeft());
Await.ready(allRunning, deadline.timeLeft());
LOG.info("All tasks are running.");
// Trigger savepoint
String savepointPath = null;
for (int i = 0; i < 20; i++) {
LOG.info("Triggering savepoint (" + (i + 1) + "/20).");
Future<Object> savepointFuture = jm.ask(new TriggerSavepoint(jobId, Option.<String>empty()), deadline.timeLeft());
Object savepointResponse = Await.result(savepointFuture, deadline.timeLeft());
if (savepointResponse.getClass() == TriggerSavepointSuccess.class) {
savepointPath = ((TriggerSavepointSuccess) savepointResponse).savepointPath();
LOG.info("Triggered savepoint. Path: " + savepointPath);
} else if (savepointResponse.getClass() == JobManagerMessages.TriggerSavepointFailure.class) {
Throwable cause = ((JobManagerMessages.TriggerSavepointFailure) savepointResponse).cause();
LOG.info("Failed to trigger savepoint. Retrying...", cause);
// This can fail if the operators are not opened yet
Thread.sleep(500);
} else {
throw new IllegalStateException("Unexpected response to TriggerSavepoint");
}
}
assertNotNull("Failed to trigger savepoint", savepointPath);
// Upload JAR
LOG.info("Uploading JAR " + CUSTOM_KV_STATE_JAR_PATH + " for savepoint disposal.");
List<BlobKey> blobKeys = BlobClient.uploadJarFiles(jm, deadline.timeLeft(), testCluster.userConfiguration(), Collections.singletonList(new Path(CUSTOM_KV_STATE_JAR_PATH)));
// Dispose savepoint
LOG.info("Disposing savepoint at " + savepointPath);
Future<Object> disposeFuture = jm.ask(new DisposeSavepoint(savepointPath), deadline.timeLeft());
Object disposeResponse = Await.result(disposeFuture, deadline.timeLeft());
if (disposeResponse.getClass() == JobManagerMessages.getDisposeSavepointSuccess().getClass()) {
// Success :-)
LOG.info("Disposed savepoint at " + savepointPath);
} else if (disposeResponse instanceof DisposeSavepointFailure) {
throw new IllegalStateException("Failed to dispose savepoint " + disposeResponse);
} else {
throw new IllegalStateException("Unexpected response to DisposeSavepoint");
}
}
use of scala.concurrent.duration.Deadline in project flink by apache.
the class WebFrontendITCase method testStop.
@Test
public void testStop() throws Exception {
// this only works if there is no active job at this point
assertTrue(cluster.getCurrentlyRunningJobsJava().isEmpty());
// Create a task
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(2);
sender.setInvokableClass(StoppableInvokable.class);
final JobGraph jobGraph = new JobGraph("Stoppable streaming test job", sender);
final JobID jid = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// wait for job to show up
while (cluster.getCurrentlyRunningJobsJava().isEmpty()) {
Thread.sleep(10);
}
final FiniteDuration testTimeout = new FiniteDuration(2, TimeUnit.MINUTES);
final Deadline deadline = testTimeout.fromNow();
while (!cluster.getCurrentlyRunningJobsJava().isEmpty()) {
try (HttpTestClient client = new HttpTestClient("localhost", port)) {
// Request the file from the web server
client.sendDeleteRequest("/jobs/" + jid + "/stop", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("json"));
assertEquals("{}", response.getContent());
}
Thread.sleep(20);
}
// ensure we can access job details when its finished (FLINK-4011)
try (HttpTestClient client = new HttpTestClient("localhost", port)) {
FiniteDuration timeout = new FiniteDuration(30, TimeUnit.SECONDS);
client.sendGetRequest("/jobs/" + jid + "/config", timeout);
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(timeout);
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("json"));
assertEquals("{\"jid\":\"" + jid + "\",\"name\":\"Stoppable streaming test job\"," + "\"execution-config\":{\"execution-mode\":\"PIPELINED\",\"restart-strategy\":\"default\"," + "\"job-parallelism\":-1,\"object-reuse-mode\":false,\"user-config\":{}}}", response.getContent());
}
}
use of scala.concurrent.duration.Deadline in project flink by apache.
the class KvStateClientTest method testConcurrentQueries.
/**
* Multiple threads concurrently fire queries.
*/
@Test
public void testConcurrentQueries() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
ExecutorService executor = null;
KvStateClient client = null;
Channel serverChannel = null;
final byte[] serializedResult = new byte[1024];
ThreadLocalRandom.current().nextBytes(serializedResult);
try {
int numQueryTasks = 4;
final int numQueriesPerTask = 1024;
executor = Executors.newFixedThreadPool(numQueryTasks);
client = new KvStateClient(1, stats);
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf buf = (ByteBuf) msg;
assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);
buf.release();
ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(ctx.alloc(), request.getRequestId(), serializedResult);
ctx.channel().writeAndFlush(response);
}
});
final KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
final KvStateClient finalClient = client;
Callable<List<Future<byte[]>>> queryTask = new Callable<List<Future<byte[]>>>() {
@Override
public List<Future<byte[]>> call() throws Exception {
List<Future<byte[]>> results = new ArrayList<>(numQueriesPerTask);
for (int i = 0; i < numQueriesPerTask; i++) {
results.add(finalClient.getKvState(serverAddress, new KvStateID(), new byte[0]));
}
return results;
}
};
// Submit query tasks
List<java.util.concurrent.Future<List<Future<byte[]>>>> futures = new ArrayList<>();
for (int i = 0; i < numQueryTasks; i++) {
futures.add(executor.submit(queryTask));
}
// Verify results
for (java.util.concurrent.Future<List<Future<byte[]>>> future : futures) {
List<Future<byte[]>> results = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
for (Future<byte[]> result : results) {
byte[] actual = Await.result(result, deadline.timeLeft());
assertArrayEquals(serializedResult, actual);
}
}
int totalQueries = numQueryTasks * numQueriesPerTask;
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && stats.getNumSuccessful() != totalQueries) {
Thread.sleep(100);
}
assertEquals(totalQueries, stats.getNumRequests());
assertEquals(totalQueries, stats.getNumSuccessful());
} finally {
if (executor != null) {
executor.shutdown();
}
if (serverChannel != null) {
serverChannel.close();
}
if (client != null) {
client.shutDown();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
Aggregations