Search in sources :

Example 31 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class BlobsCleanupITCase method testBlobServerCleanup.

private void testBlobServerCleanup(final TestCase testCase) throws Exception {
    final MiniCluster miniCluster = miniClusterResource.getMiniCluster();
    final int numTasks = 2;
    final Deadline timeout = Deadline.fromNow(Duration.ofSeconds(30L));
    final JobGraph jobGraph = createJobGraph(testCase, numTasks);
    final JobID jid = jobGraph.getJobID();
    // upload a blob
    final File tempBlob = File.createTempFile("Required", ".jar");
    final int blobPort = miniCluster.getClusterInformation().getBlobServerPort();
    List<PermanentBlobKey> keys = BlobClient.uploadFiles(new InetSocketAddress("localhost", blobPort), configuration, jid, Collections.singletonList(new Path(tempBlob.getAbsolutePath())));
    assertThat(keys, hasSize(1));
    jobGraph.addUserJarBlobKey(keys.get(0));
    if (testCase == TestCase.JOB_SUBMISSION_FAILS) {
        // add an invalid key so that the submission fails
        jobGraph.addUserJarBlobKey(new PermanentBlobKey());
    }
    final CompletableFuture<JobSubmissionResult> submissionFuture = miniCluster.submitJob(jobGraph);
    if (testCase == TestCase.JOB_SUBMISSION_FAILS) {
        try {
            submissionFuture.get();
            fail("Expected job submission failure.");
        } catch (ExecutionException e) {
            assertThat(ExceptionUtils.findThrowable(e, JobSubmissionException.class).isPresent(), is(true));
        }
    } else {
        final JobSubmissionResult jobSubmissionResult = submissionFuture.get();
        assertThat(jobSubmissionResult.getJobID(), is(jid));
        final CompletableFuture<JobResult> resultFuture = miniCluster.requestJobResult(jid);
        if (testCase == TestCase.JOB_FAILS) {
            // fail a task so that the job is going to be recovered (we actually do not
            // need the blocking part of the invokable and can start throwing right away)
            FailingBlockingInvokable.unblock();
            // job will get restarted, BlobCache may re-download the BLOB if already deleted
            // then the tasks will fail again and the restart strategy will finalise the job
            final JobResult jobResult = resultFuture.get();
            assertThat(jobResult.isSuccess(), is(false));
            assertThat(jobResult.getApplicationStatus(), is(ApplicationStatus.FAILED));
        } else if (testCase == TestCase.JOB_IS_CANCELLED) {
            miniCluster.cancelJob(jid);
            final JobResult jobResult = resultFuture.get();
            assertThat(jobResult.isSuccess(), is(false));
            assertThat(jobResult.getApplicationStatus(), is(ApplicationStatus.CANCELED));
        } else {
            final JobResult jobResult = resultFuture.get();
            Throwable cause = jobResult.getSerializedThrowable().map(throwable -> throwable.deserializeError(getClass().getClassLoader())).orElse(null);
            assertThat(ExceptionUtils.stringifyException(cause), jobResult.isSuccess(), is(true));
        }
    }
    // both BlobServer and BlobCache should eventually delete all files
    File[] blobDirs = blobBaseDir.listFiles((dir, name) -> name.startsWith("blobStore-"));
    assertNotNull(blobDirs);
    for (File blobDir : blobDirs) {
        waitForEmptyBlobDir(blobDir, timeout.timeLeft());
    }
}
Also used : Path(org.apache.flink.core.fs.Path) JobResult(org.apache.flink.runtime.jobmaster.JobResult) InetSocketAddress(java.net.InetSocketAddress) Deadline(org.apache.flink.api.common.time.Deadline) JobSubmissionException(org.apache.flink.runtime.client.JobSubmissionException) JobSubmissionResult(org.apache.flink.api.common.JobSubmissionResult) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) PermanentBlobKey(org.apache.flink.runtime.blob.PermanentBlobKey) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) ExecutionException(java.util.concurrent.ExecutionException) File(java.io.File) JobID(org.apache.flink.api.common.JobID)

Example 32 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class RescalingITCase method testSavepointRescalingPartitionedOperatorState.

/**
 * Tests rescaling of partitioned operator state. More specific, we test the mechanism with
 * {@link ListCheckpointed} as it subsumes {@link
 * org.apache.flink.streaming.api.checkpoint.CheckpointedFunction}.
 */
public void testSavepointRescalingPartitionedOperatorState(boolean scaleOut, OperatorCheckpointMethod checkpointMethod) throws Exception {
    final int parallelism = scaleOut ? numSlots : numSlots / 2;
    final int parallelism2 = scaleOut ? numSlots / 2 : numSlots;
    final int maxParallelism = 13;
    Duration timeout = Duration.ofMinutes(3);
    Deadline deadline = Deadline.now().plus(timeout);
    ClusterClient<?> client = cluster.getClusterClient();
    int counterSize = Math.max(parallelism, parallelism2);
    if (checkpointMethod == OperatorCheckpointMethod.CHECKPOINTED_FUNCTION || checkpointMethod == OperatorCheckpointMethod.CHECKPOINTED_FUNCTION_BROADCAST) {
        PartitionedStateSource.checkCorrectSnapshot = new int[counterSize];
        PartitionedStateSource.checkCorrectRestore = new int[counterSize];
    } else {
        PartitionedStateSourceListCheckpointed.checkCorrectSnapshot = new int[counterSize];
        PartitionedStateSourceListCheckpointed.checkCorrectRestore = new int[counterSize];
    }
    try {
        JobGraph jobGraph = createJobGraphWithOperatorState(parallelism, maxParallelism, checkpointMethod);
        // make sure the job does not finish before we take the savepoint
        StateSourceBase.canFinishLatch = new CountDownLatch(1);
        final JobID jobID = jobGraph.getJobID();
        client.submitJob(jobGraph).get();
        // wait until the operator is started
        waitForAllTaskRunning(cluster.getMiniCluster(), jobGraph.getJobID(), false);
        // wait until the operator handles some data
        StateSourceBase.workStartedLatch.await();
        CompletableFuture<String> savepointPathFuture = FutureUtils.retryWithDelay(() -> client.triggerSavepoint(jobID, null, SavepointFormatType.CANONICAL), (int) deadline.timeLeft().getSeconds() / 10, Time.seconds(10), (throwable) -> true, TestingUtils.defaultScheduledExecutor());
        final String savepointPath = savepointPathFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
        // we took a savepoint, the job can finish now
        StateSourceBase.canFinishLatch.countDown();
        client.cancel(jobID).get();
        while (!getRunningJobs(client).isEmpty()) {
            Thread.sleep(50);
        }
        JobGraph scaledJobGraph = createJobGraphWithOperatorState(parallelism2, maxParallelism, checkpointMethod);
        scaledJobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath));
        submitJobAndWaitForResult(client, scaledJobGraph, getClass().getClassLoader());
        int sumExp = 0;
        int sumAct = 0;
        if (checkpointMethod == OperatorCheckpointMethod.CHECKPOINTED_FUNCTION) {
            for (int c : PartitionedStateSource.checkCorrectSnapshot) {
                sumExp += c;
            }
            for (int c : PartitionedStateSource.checkCorrectRestore) {
                sumAct += c;
            }
        } else if (checkpointMethod == OperatorCheckpointMethod.CHECKPOINTED_FUNCTION_BROADCAST) {
            for (int c : PartitionedStateSource.checkCorrectSnapshot) {
                sumExp += c;
            }
            for (int c : PartitionedStateSource.checkCorrectRestore) {
                sumAct += c;
            }
            sumExp *= parallelism2;
        } else {
            for (int c : PartitionedStateSourceListCheckpointed.checkCorrectSnapshot) {
                sumExp += c;
            }
            for (int c : PartitionedStateSourceListCheckpointed.checkCorrectRestore) {
                sumAct += c;
            }
        }
        assertEquals(sumExp, sumAct);
    } finally {
    }
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Deadline(org.apache.flink.api.common.time.Deadline) Duration(java.time.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) JobID(org.apache.flink.api.common.JobID)

Example 33 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class SavepointITCase method testCanRestoreWithModifiedStatelessOperators.

/**
 * FLINK-5985
 *
 * <p>This test ensures we can restore from a savepoint under modifications to the job graph
 * that only concern stateless operators.
 */
@Test
public void testCanRestoreWithModifiedStatelessOperators() throws Exception {
    // Config
    int numTaskManagers = 2;
    int numSlotsPerTaskManager = 2;
    int parallelism = 2;
    // Test deadline
    final Deadline deadline = Deadline.now().plus(Duration.ofMinutes(5));
    // Flink configuration
    final Configuration config = new Configuration();
    config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir.toURI().toString());
    String savepointPath;
    LOG.info("Flink configuration: " + config + ".");
    // Start Flink
    MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(config).setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
    LOG.info("Shutting down Flink cluster.");
    cluster.before();
    ClusterClient<?> client = cluster.getClusterClient();
    try {
        final StatefulCounter statefulCounter = new StatefulCounter();
        StatefulCounter.resetForTest(parallelism);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
        env.addSource(new InfiniteTestSource()).shuffle().map(value -> 4 * value).shuffle().map(statefulCounter).uid("statefulCounter").shuffle().map(value -> 2 * value).addSink(new DiscardingSink<>());
        JobGraph originalJobGraph = env.getStreamGraph().getJobGraph();
        JobID jobID = client.submitJob(originalJobGraph).get();
        // wait for the Tasks to be ready
        waitForAllTaskRunning(cluster.getMiniCluster(), jobID, false);
        assertTrue(StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
        savepointPath = client.triggerSavepoint(jobID, null, SavepointFormatType.CANONICAL).get();
        LOG.info("Retrieved savepoint: " + savepointPath + ".");
    } finally {
        // Shut down the Flink cluster (thereby canceling the job)
        LOG.info("Shutting down Flink cluster.");
        cluster.after();
    }
    // create a new MiniCluster to make sure we start with completely
    // new resources
    cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(config).setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
    LOG.info("Restarting Flink cluster.");
    cluster.before();
    client = cluster.getClusterClient();
    try {
        // Reset static test helpers
        StatefulCounter.resetForTest(parallelism);
        // Gather all task deployment descriptors
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
        // generate a modified job graph that adds a stateless op
        env.addSource(new InfiniteTestSource()).shuffle().map(new StatefulCounter()).uid("statefulCounter").shuffle().map(value -> value).addSink(new DiscardingSink<>());
        JobGraph modifiedJobGraph = env.getStreamGraph().getJobGraph();
        // Set the savepoint path
        modifiedJobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath));
        LOG.info("Resubmitting job " + modifiedJobGraph.getJobID() + " with " + "savepoint path " + savepointPath + " in detached mode.");
        // Submit the job
        client.submitJob(modifiedJobGraph).get();
        // Await state is restored
        assertTrue(StatefulCounter.getRestoreLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
        // Await some progress after restore
        assertTrue(StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
    } finally {
        cluster.after();
    }
}
Also used : Arrays(java.util.Arrays) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) MemorySize(org.apache.flink.configuration.MemorySize) EmptyRequestBody(org.apache.flink.runtime.rest.messages.EmptyRequestBody) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) ExceptionUtils.findThrowable(org.apache.flink.util.ExceptionUtils.findThrowable) CheckpointException(org.apache.flink.runtime.checkpoint.CheckpointException) TestUtils.submitJobAndWaitForResult(org.apache.flink.test.util.TestUtils.submitJobAndWaitForResult) FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) CheckpointListener(org.apache.flink.api.common.state.CheckpointListener) Duration(java.time.Duration) Map(java.util.Map) StreamGraph(org.apache.flink.streaming.api.graph.StreamGraph) ExceptionUtils.assertThrowable(org.apache.flink.util.ExceptionUtils.assertThrowable) RichSourceFunction(org.apache.flink.streaming.api.functions.source.RichSourceFunction) Path(java.nio.file.Path) StateSnapshotContext(org.apache.flink.runtime.state.StateSnapshotContext) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) BoundedOneInput(org.apache.flink.streaming.api.operators.BoundedOneInput) FileSystemFactory(org.apache.flink.core.fs.FileSystemFactory) CountDownLatch(java.util.concurrent.CountDownLatch) JobMessageParameters(org.apache.flink.runtime.rest.messages.JobMessageParameters) Stream(java.util.stream.Stream) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) Assert.assertFalse(org.junit.Assert.assertFalse) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) Time(org.apache.flink.api.common.time.Time) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) FlinkException(org.apache.flink.util.FlinkException) LocalFileSystem(org.apache.flink.core.fs.local.LocalFileSystem) JobStatus(org.apache.flink.api.common.JobStatus) KeyedProcessFunction(org.apache.flink.streaming.api.functions.KeyedProcessFunction) TypeSafeDiagnosingMatcher(org.hamcrest.TypeSafeDiagnosingMatcher) TaskManagerOptions(org.apache.flink.configuration.TaskManagerOptions) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Collector(org.apache.flink.util.Collector) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) Before(org.junit.Before) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) Files(java.nio.file.Files) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) FSDataInputStream(org.apache.flink.core.fs.FSDataInputStream) File(java.io.File) AbstractStreamOperator(org.apache.flink.streaming.api.operators.AbstractStreamOperator) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) Paths(java.nio.file.Paths) Matcher(org.hamcrest.Matcher) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) Assert.assertEquals(org.junit.Assert.assertEquals) StateBackendOptions(org.apache.flink.configuration.StateBackendOptions) EntropyInjectingTestFileSystem(org.apache.flink.testutils.EntropyInjectingTestFileSystem) Deadline(org.apache.flink.api.common.time.Deadline) ExceptionUtils.findThrowableWithMessage(org.apache.flink.util.ExceptionUtils.findThrowableWithMessage) ClusterOptions(org.apache.flink.configuration.ClusterOptions) FileUtils(org.apache.flink.util.FileUtils) URISyntaxException(java.net.URISyntaxException) BiFunction(java.util.function.BiFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) LoggerFactory(org.slf4j.LoggerFactory) BlockingNoOpInvokable(org.apache.flink.runtime.testtasks.BlockingNoOpInvokable) Random(java.util.Random) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) MapFunction(org.apache.flink.api.common.functions.MapFunction) BasicTypeInfo(org.apache.flink.api.common.typeinfo.BasicTypeInfo) Assert.assertThat(org.junit.Assert.assertThat) ListState(org.apache.flink.api.common.state.ListState) CommonTestUtils.waitForAllTaskRunning(org.apache.flink.runtime.testutils.CommonTestUtils.waitForAllTaskRunning) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) TestLogger(org.apache.flink.util.TestLogger) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) Assert.fail(org.junit.Assert.fail) URI(java.net.URI) KeySelector(org.apache.flink.api.java.functions.KeySelector) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) CheckpointingOptions(org.apache.flink.configuration.CheckpointingOptions) Objects(java.util.Objects) TestingUtils(org.apache.flink.testutils.TestingUtils) List(java.util.List) FileSystem(org.apache.flink.core.fs.FileSystem) FlinkJobNotFoundException(org.apache.flink.runtime.messages.FlinkJobNotFoundException) Optional(java.util.Optional) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) IterativeStream(org.apache.flink.streaming.api.datastream.IterativeStream) CompletableFuture(java.util.concurrent.CompletableFuture) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) RestClusterClient(org.apache.flink.client.program.rest.RestClusterClient) RestoreMode(org.apache.flink.runtime.jobgraph.RestoreMode) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) CompletableFuture.allOf(java.util.concurrent.CompletableFuture.allOf) JobGraphTestUtils(org.apache.flink.runtime.jobgraph.JobGraphTestUtils) JobDetailsHeaders(org.apache.flink.runtime.rest.messages.job.JobDetailsHeaders) SharedReference(org.apache.flink.testutils.junit.SharedReference) Description(org.hamcrest.Description) Logger(org.slf4j.Logger) LocalRecoverableWriter(org.apache.flink.core.fs.local.LocalRecoverableWriter) DiscardingSink(org.apache.flink.streaming.api.functions.sink.DiscardingSink) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) ExceptionUtils.assertThrowableWithMessage(org.apache.flink.util.ExceptionUtils.assertThrowableWithMessage) DataStream(org.apache.flink.streaming.api.datastream.DataStream) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Ignore(org.junit.Ignore) ListCheckpointed(org.apache.flink.streaming.api.checkpoint.ListCheckpointed) FileVisitOption(java.nio.file.FileVisitOption) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) Configuration(org.apache.flink.configuration.Configuration) Deadline(org.apache.flink.api.common.time.Deadline) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 34 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class UnalignedCheckpointTestBase method waitForCleanShutdown.

private void waitForCleanShutdown() throws InterruptedException {
    // slow down when half the memory is taken and wait for gc
    if (PlatformDependent.usedDirectMemory() > PlatformDependent.maxDirectMemory() / 2) {
        final Duration waitTime = Duration.ofSeconds(10);
        Deadline deadline = Deadline.fromNow(waitTime);
        while (PlatformDependent.usedDirectMemory() > 0 && deadline.hasTimeLeft()) {
            System.gc();
            Thread.sleep(100);
        }
        final Duration timeLeft = deadline.timeLeft();
        if (timeLeft.isNegative()) {
            LOG.warn("Waited 10s for clean shutdown of previous runs but there is still direct memory in use: " + PlatformDependent.usedDirectMemory());
        } else {
            LOG.info("Needed to wait {} ms for full cleanup of previous runs.", waitTime.minus(timeLeft).toMillis());
        }
    }
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) Duration(java.time.Duration)

Example 35 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class KinesisTableApiITCase method readAllOrdersFromKinesis.

private List<Order> readAllOrdersFromKinesis(final KinesisPubsubClient client) throws Exception {
    Deadline deadline = Deadline.fromNow(Duration.ofSeconds(5));
    List<Order> orders;
    do {
        Thread.sleep(1000);
        orders = client.readAllMessages(LARGE_ORDERS_STREAM).stream().map(order -> fromJson(order, Order.class)).collect(Collectors.toList());
    } while (deadline.hasTimeLeft() && orders.size() < 3);
    return orders;
}
Also used : Order(org.apache.flink.streaming.kinesis.test.model.Order) Deadline(org.apache.flink.api.common.time.Deadline)

Aggregations

Deadline (org.apache.flink.api.common.time.Deadline)75 Test (org.junit.Test)34 JobID (org.apache.flink.api.common.JobID)29 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)26 Duration (java.time.Duration)19 Configuration (org.apache.flink.configuration.Configuration)15 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)14 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)13 IOException (java.io.IOException)12 ExecutionException (java.util.concurrent.ExecutionException)12 KeySelector (org.apache.flink.api.java.functions.KeySelector)12 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 MiniCluster (org.apache.flink.runtime.minicluster.MiniCluster)10 File (java.io.File)9 TimeUnit (java.util.concurrent.TimeUnit)9 JobStatus (org.apache.flink.api.common.JobStatus)9 List (java.util.List)8 Test (org.junit.jupiter.api.Test)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 CountDownLatch (java.util.concurrent.CountDownLatch)7