Search in sources :

Example 51 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class FlinkKinesisConsumerTest method testSourceSynchronization.

@Test
public void testSourceSynchronization() throws Exception {
    final String streamName = "fakeStreamName";
    final Time maxOutOfOrderness = Time.milliseconds(5);
    final long autoWatermarkInterval = 1_000;
    final long watermarkSyncInterval = autoWatermarkInterval + 1;
    TestWatermarkTracker.WATERMARK.set(0);
    HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds = new HashMap<>();
    subscribedStreamsToLastDiscoveredShardIds.put(streamName, null);
    final KinesisDeserializationSchema<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(new OpenCheckingStringSchema());
    Properties props = new Properties();
    props.setProperty(ConsumerConfigConstants.AWS_REGION, "us-east-1");
    props.setProperty(ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS, Long.toString(10L));
    props.setProperty(ConsumerConfigConstants.WATERMARK_SYNC_MILLIS, Long.toString(watermarkSyncInterval));
    props.setProperty(ConsumerConfigConstants.WATERMARK_LOOKAHEAD_MILLIS, Long.toString(5));
    BlockingQueue<String> shard1 = new LinkedBlockingQueue<>();
    Map<String, List<BlockingQueue<String>>> streamToQueueMap = new HashMap<>();
    streamToQueueMap.put(streamName, Collections.singletonList(shard1));
    // override createFetcher to mock Kinesis
    FlinkKinesisConsumer<String> sourceFunc = new FlinkKinesisConsumer<String>(streamName, deserializationSchema, props) {

        @Override
        protected KinesisDataFetcher<String> createFetcher(List<String> streams, SourceFunction.SourceContext<String> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<String> deserializationSchema) {
            KinesisDataFetcher<String> fetcher = new KinesisDataFetcher<String>(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, getShardAssigner(), getPeriodicWatermarkAssigner(), getWatermarkTracker(), new AtomicReference<>(), new ArrayList<>(), subscribedStreamsToLastDiscoveredShardIds, (props) -> FakeKinesisBehavioursFactory.blockingQueueGetRecords(streamToQueueMap), null) {

                @Override
                protected void emitWatermark() {
                    // before the watermark timer callback is triggered
                    synchronized (sourceContext.getCheckpointLock()) {
                        super.emitWatermark();
                    }
                }
            };
            return fetcher;
        }
    };
    sourceFunc.setShardAssigner((streamShardHandle, i) -> {
        // shardId-000000000000
        return Integer.parseInt(streamShardHandle.getShard().getShardId().substring("shardId-".length()));
    });
    sourceFunc.setPeriodicWatermarkAssigner(new TestTimestampExtractor(maxOutOfOrderness));
    sourceFunc.setWatermarkTracker(new TestWatermarkTracker());
    // there is currently no test harness specifically for sources,
    // so we overlay the source thread here
    AbstractStreamOperatorTestHarness<Object> testHarness = new AbstractStreamOperatorTestHarness<Object>(new StreamSource(sourceFunc), 1, 1, 0);
    testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
    testHarness.getExecutionConfig().setAutoWatermarkInterval(autoWatermarkInterval);
    testHarness.initializeEmptyState();
    testHarness.open();
    final ConcurrentLinkedQueue<Object> results = testHarness.getOutput();
    final AtomicBoolean throwOnCollect = new AtomicBoolean();
    @SuppressWarnings("unchecked") SourceFunction.SourceContext<String> sourceContext = new CollectingSourceContext(testHarness.getCheckpointLock(), results) {

        @Override
        public void markAsTemporarilyIdle() {
        }

        @Override
        public void collect(Serializable element) {
            if (throwOnCollect.get()) {
                throw new RuntimeException("expected");
            }
            super.collect(element);
        }

        @Override
        public void emitWatermark(Watermark mark) {
            results.add(mark);
        }
    };
    final AtomicReference<Exception> sourceThreadError = new AtomicReference<>();
    new Thread(() -> {
        try {
            sourceFunc.run(sourceContext);
        } catch (InterruptedException e) {
        // expected on cancel
        } catch (Exception e) {
            sourceThreadError.set(e);
        }
    }).start();
    ArrayList<Object> expectedResults = new ArrayList<>();
    final long record1 = 1;
    shard1.put(Long.toString(record1));
    expectedResults.add(Long.toString(record1));
    awaitRecordCount(results, expectedResults.size());
    // at this point we know the fetcher was initialized
    final KinesisDataFetcher fetcher = org.powermock.reflect.Whitebox.getInternalState(sourceFunc, "fetcher");
    // trigger watermark emit
    testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval);
    expectedResults.add(new Watermark(-4));
    // verify watermark
    awaitRecordCount(results, expectedResults.size());
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    assertEquals(0, TestWatermarkTracker.WATERMARK.get());
    // trigger sync
    testHarness.setProcessingTime(testHarness.getProcessingTime() + 1);
    TestWatermarkTracker.assertGlobalWatermark(-4);
    final long record2 = record1 + (watermarkSyncInterval * 3) + 1;
    shard1.put(Long.toString(record2));
    // wait for the record to be buffered in the emitter
    final RecordEmitter<?> emitter = org.powermock.reflect.Whitebox.getInternalState(fetcher, "recordEmitter");
    RecordEmitter.RecordQueue emitterQueue = emitter.getQueue(0);
    Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
    while (deadline.hasTimeLeft() && emitterQueue.getSize() < 1) {
        Thread.sleep(10);
    }
    assertEquals("first record received", 1, emitterQueue.getSize());
    // Advance the watermark. Since the new record is past global watermark + threshold,
    // it won't be emitted and the watermark does not advance
    testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval);
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    assertEquals(3000L, (long) org.powermock.reflect.Whitebox.getInternalState(fetcher, "nextWatermark"));
    TestWatermarkTracker.assertGlobalWatermark(-4);
    // Trigger global watermark sync
    testHarness.setProcessingTime(testHarness.getProcessingTime() + 1);
    expectedResults.add(Long.toString(record2));
    awaitRecordCount(results, expectedResults.size());
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    TestWatermarkTracker.assertGlobalWatermark(3000);
    // Trigger watermark update and emit
    testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval);
    expectedResults.add(new Watermark(3000));
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    // verify exception propagation
    Assert.assertNull(sourceThreadError.get());
    throwOnCollect.set(true);
    shard1.put(Long.toString(record2 + 1));
    deadline = Deadline.fromNow(Duration.ofSeconds(10));
    while (deadline.hasTimeLeft() && sourceThreadError.get() == null) {
        Thread.sleep(10);
    }
    Assert.assertNotNull(sourceThreadError.get());
    Assert.assertNotNull("expected", sourceThreadError.get().getMessage());
    sourceFunc.cancel();
    testHarness.close();
}
Also used : Serializable(java.io.Serializable) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Time(org.apache.flink.streaming.api.windowing.time.Time) Properties(java.util.Properties) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) CollectingSourceContext(org.apache.flink.streaming.util.CollectingSourceContext) AbstractStreamOperatorTestHarness(org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness) List(java.util.List) ArrayList(java.util.ArrayList) KinesisDeserializationSchema(org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) StreamSource(org.apache.flink.streaming.api.operators.StreamSource) Deadline(org.apache.flink.api.common.time.Deadline) AtomicReference(java.util.concurrent.atomic.AtomicReference) KinesisDeserializationSchemaWrapper(org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper) CollectingSourceContext(org.apache.flink.streaming.util.CollectingSourceContext) KinesisDataFetcher(org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher) RecordEmitter(org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TestableFlinkKinesisConsumer(org.apache.flink.streaming.connectors.kinesis.testutils.TestableFlinkKinesisConsumer) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) Watermark(org.apache.flink.streaming.api.watermark.Watermark) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 52 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class SQLClientSchemaRegistryITCase method getAllVersions.

private List<Integer> getAllVersions(String behaviourSubject) throws Exception {
    Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
    Exception ex = new IllegalStateException("Could not query schema registry. Negative deadline provided.");
    while (deadline.hasTimeLeft()) {
        try {
            return registryClient.getAllVersions(behaviourSubject);
        } catch (RestClientException e) {
            ex = e;
        }
    }
    throw ex;
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) RestClientException(io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException) RestClientException(io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException)

Example 53 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class GlueSchemaRegistryJsonKinesisITCase method testGSRJsonGenericFormatWithFlink.

@Test
public void testGSRJsonGenericFormatWithFlink() throws Exception {
    List<JsonDataWithSchema> messages = getGenericRecords();
    for (JsonDataWithSchema msg : messages) {
        kinesisClient.sendMessage(msg.getSchema(), INPUT_STREAM, msg);
    }
    log.info("generated records");
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    DataStream<JsonDataWithSchema> input = env.addSource(createSource());
    input.addSink(createSink());
    env.executeAsync();
    Deadline deadline = Deadline.fromNow(Duration.ofSeconds(60));
    List<Object> results = kinesisClient.readAllMessages(OUTPUT_STREAM);
    while (deadline.hasTimeLeft() && results.size() < messages.size()) {
        log.info("waiting for results..");
        Thread.sleep(1000);
        results = kinesisClient.readAllMessages(OUTPUT_STREAM);
    }
    log.info("results: {}", results);
    assertThat(results).containsExactlyInAnyOrderElementsOf(messages);
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JsonDataWithSchema(com.amazonaws.services.schemaregistry.serializers.json.JsonDataWithSchema) Test(org.junit.Test)

Example 54 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class ApplicationDispatcherBootstrapITCase method testDirtyJobResultRecoveryInApplicationMode.

@Test
public void testDirtyJobResultRecoveryInApplicationMode() throws Exception {
    final Deadline deadline = Deadline.fromNow(TIMEOUT);
    final Configuration configuration = new Configuration();
    configuration.set(HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.ZOOKEEPER.name());
    configuration.set(DeploymentOptions.TARGET, EmbeddedExecutor.NAME);
    configuration.set(ClientOptions.CLIENT_RETRY_PERIOD, Duration.ofMillis(100));
    final TestingMiniClusterConfiguration clusterConfiguration = TestingMiniClusterConfiguration.newBuilder().setConfiguration(configuration).build();
    // having a dirty entry in the JobResultStore should make the ApplicationDispatcherBootstrap
    // implementation fail to submit the job
    final JobResultStore jobResultStore = new EmbeddedJobResultStore();
    jobResultStore.createDirtyResult(new JobResultEntry(TestingJobResultStore.createSuccessfulJobResult(ApplicationDispatcherBootstrap.ZERO_JOB_ID)));
    final EmbeddedHaServicesWithLeadershipControl haServices = new EmbeddedHaServicesWithLeadershipControl(TestingUtils.defaultExecutor()) {

        @Override
        public JobResultStore getJobResultStore() {
            return jobResultStore;
        }
    };
    final TestingMiniCluster.Builder clusterBuilder = TestingMiniCluster.newBuilder(clusterConfiguration).setHighAvailabilityServicesSupplier(() -> haServices).setDispatcherResourceManagerComponentFactorySupplier(createApplicationModeDispatcherResourceManagerComponentFactorySupplier(clusterConfiguration.getConfiguration(), ErrorHandlingSubmissionJob.createPackagedProgram()));
    try (final MiniCluster cluster = clusterBuilder.build()) {
        // start mini cluster and submit the job
        cluster.start();
        // the cluster should shut down automatically once the application completes
        awaitClusterStopped(cluster, deadline);
    }
    FlinkAssertions.assertThatChainOfCauses(ErrorHandlingSubmissionJob.getSubmissionException()).as("The job's main method shouldn't have been succeeded due to a DuplicateJobSubmissionException.").hasAtLeastOneElementOfType(DuplicateJobSubmissionException.class);
    assertThat(jobResultStore.hasDirtyJobResultEntry(ApplicationDispatcherBootstrap.ZERO_JOB_ID)).isFalse();
    assertThat(jobResultStore.hasCleanJobResultEntry(ApplicationDispatcherBootstrap.ZERO_JOB_ID)).isTrue();
}
Also used : TestingMiniCluster(org.apache.flink.runtime.minicluster.TestingMiniCluster) TestingMiniClusterConfiguration(org.apache.flink.runtime.minicluster.TestingMiniClusterConfiguration) Configuration(org.apache.flink.configuration.Configuration) TestingMiniClusterConfiguration(org.apache.flink.runtime.minicluster.TestingMiniClusterConfiguration) Deadline(org.apache.flink.api.common.time.Deadline) JobResultEntry(org.apache.flink.runtime.highavailability.JobResultEntry) EmbeddedHaServicesWithLeadershipControl(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedHaServicesWithLeadershipControl) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) TestingMiniCluster(org.apache.flink.runtime.minicluster.TestingMiniCluster) EmbeddedJobResultStore(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore) TestingJobResultStore(org.apache.flink.runtime.testutils.TestingJobResultStore) JobResultStore(org.apache.flink.runtime.highavailability.JobResultStore) EmbeddedJobResultStore(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore) Test(org.junit.jupiter.api.Test)

Example 55 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class ApplicationDispatcherBootstrapITCase method testSubmitFailedJobOnApplicationError.

@Test
public void testSubmitFailedJobOnApplicationError() throws Exception {
    final Deadline deadline = Deadline.fromNow(TIMEOUT);
    final JobID jobId = new JobID();
    final Configuration configuration = new Configuration();
    configuration.set(HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.ZOOKEEPER.name());
    configuration.set(DeploymentOptions.TARGET, EmbeddedExecutor.NAME);
    configuration.set(ClientOptions.CLIENT_RETRY_PERIOD, Duration.ofMillis(100));
    configuration.set(DeploymentOptions.SHUTDOWN_ON_APPLICATION_FINISH, false);
    configuration.set(DeploymentOptions.SUBMIT_FAILED_JOB_ON_APPLICATION_ERROR, true);
    configuration.set(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID, jobId.toHexString());
    final TestingMiniClusterConfiguration clusterConfiguration = TestingMiniClusterConfiguration.newBuilder().setConfiguration(configuration).build();
    final EmbeddedHaServicesWithLeadershipControl haServices = new EmbeddedHaServicesWithLeadershipControl(TestingUtils.defaultExecutor());
    final TestingMiniCluster.Builder clusterBuilder = TestingMiniCluster.newBuilder(clusterConfiguration).setHighAvailabilityServicesSupplier(() -> haServices).setDispatcherResourceManagerComponentFactorySupplier(createApplicationModeDispatcherResourceManagerComponentFactorySupplier(clusterConfiguration.getConfiguration(), FailingJob.getProgram()));
    try (final MiniCluster cluster = clusterBuilder.build()) {
        // start mini cluster and submit the job
        cluster.start();
        // wait until the failed job has been submitted
        awaitJobStatus(cluster, jobId, JobStatus.FAILED, deadline);
        final ArchivedExecutionGraph graph = cluster.getArchivedExecutionGraph(jobId).get();
        assertThat(graph.getJobID()).isEqualTo(jobId);
        assertThat(graph.getJobName()).isEqualTo(ApplicationDispatcherBootstrap.FAILED_JOB_NAME);
        assertThat(graph.getFailureInfo()).isNotNull().extracting(ErrorInfo::getException).extracting(e -> e.deserializeError(Thread.currentThread().getContextClassLoader())).satisfies(e -> assertThat(e).isInstanceOf(ProgramInvocationException.class).hasRootCauseInstanceOf(RuntimeException.class).hasRootCauseMessage(FailingJob.EXCEPTION_MESSAGE));
    }
}
Also used : TestingMiniCluster(org.apache.flink.runtime.minicluster.TestingMiniCluster) Deadline(org.apache.flink.api.common.time.Deadline) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) EmbeddedHaServicesWithLeadershipControl(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedHaServicesWithLeadershipControl) TestingMiniClusterConfiguration(org.apache.flink.runtime.minicluster.TestingMiniClusterConfiguration) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) EmbeddedJobResultStore(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore) ExceptionUtils(org.apache.flink.util.ExceptionUtils) CompletableFuture(java.util.concurrent.CompletableFuture) JobStatus(org.apache.flink.api.common.JobStatus) Supplier(java.util.function.Supplier) EmbeddedExecutor(org.apache.flink.client.deployment.application.executors.EmbeddedExecutor) PipelineOptionsInternal(org.apache.flink.configuration.PipelineOptionsInternal) JobResult(org.apache.flink.runtime.jobmaster.JobResult) TestLoggerExtension(org.apache.flink.util.TestLoggerExtension) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BlockingJob(org.apache.flink.client.testjar.BlockingJob) DefaultDispatcherRunnerFactory(org.apache.flink.runtime.dispatcher.runner.DefaultDispatcherRunnerFactory) DefaultDispatcherResourceManagerComponentFactory(org.apache.flink.runtime.entrypoint.component.DefaultDispatcherResourceManagerComponentFactory) FlinkAssertions(org.apache.flink.core.testutils.FlinkAssertions) Duration(java.time.Duration) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) ErrorHandlingSubmissionJob(org.apache.flink.client.testjar.ErrorHandlingSubmissionJob) DeploymentOptions(org.apache.flink.configuration.DeploymentOptions) HighAvailabilityMode(org.apache.flink.runtime.jobmanager.HighAvailabilityMode) ClientOptions(org.apache.flink.client.cli.ClientOptions) FailingJob(org.apache.flink.client.testjar.FailingJob) DispatcherResourceManagerComponentFactory(org.apache.flink.runtime.entrypoint.component.DispatcherResourceManagerComponentFactory) ArchivedExecutionGraph(org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph) TestingMiniCluster(org.apache.flink.runtime.minicluster.TestingMiniCluster) ApplicationStatus(org.apache.flink.runtime.clusterframework.ApplicationStatus) Configuration(org.apache.flink.configuration.Configuration) ErrorInfo(org.apache.flink.runtime.executiongraph.ErrorInfo) JobRestEndpointFactory(org.apache.flink.runtime.rest.JobRestEndpointFactory) UUID(java.util.UUID) Test(org.junit.jupiter.api.Test) ExecutionException(java.util.concurrent.ExecutionException) TestingUtils(org.apache.flink.testutils.TestingUtils) JobResultEntry(org.apache.flink.runtime.highavailability.JobResultEntry) JobID(org.apache.flink.api.common.JobID) FlinkJobNotFoundException(org.apache.flink.runtime.messages.FlinkJobNotFoundException) TestingJobResultStore(org.apache.flink.runtime.testutils.TestingJobResultStore) PackagedProgram(org.apache.flink.client.program.PackagedProgram) JobResultStore(org.apache.flink.runtime.highavailability.JobResultStore) SessionDispatcherFactory(org.apache.flink.runtime.dispatcher.SessionDispatcherFactory) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) StandaloneResourceManagerFactory(org.apache.flink.runtime.resourcemanager.StandaloneResourceManagerFactory) HighAvailabilityOptions(org.apache.flink.configuration.HighAvailabilityOptions) DuplicateJobSubmissionException(org.apache.flink.runtime.client.DuplicateJobSubmissionException) TestingMiniClusterConfiguration(org.apache.flink.runtime.minicluster.TestingMiniClusterConfiguration) Configuration(org.apache.flink.configuration.Configuration) TestingMiniClusterConfiguration(org.apache.flink.runtime.minicluster.TestingMiniClusterConfiguration) Deadline(org.apache.flink.api.common.time.Deadline) ErrorInfo(org.apache.flink.runtime.executiongraph.ErrorInfo) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) EmbeddedHaServicesWithLeadershipControl(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedHaServicesWithLeadershipControl) ArchivedExecutionGraph(org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) TestingMiniCluster(org.apache.flink.runtime.minicluster.TestingMiniCluster) JobID(org.apache.flink.api.common.JobID) Test(org.junit.jupiter.api.Test)

Aggregations

Deadline (org.apache.flink.api.common.time.Deadline)75 Test (org.junit.Test)34 JobID (org.apache.flink.api.common.JobID)29 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)26 Duration (java.time.Duration)19 Configuration (org.apache.flink.configuration.Configuration)15 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)14 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)13 IOException (java.io.IOException)12 ExecutionException (java.util.concurrent.ExecutionException)12 KeySelector (org.apache.flink.api.java.functions.KeySelector)12 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 MiniCluster (org.apache.flink.runtime.minicluster.MiniCluster)10 File (java.io.File)9 TimeUnit (java.util.concurrent.TimeUnit)9 JobStatus (org.apache.flink.api.common.JobStatus)9 List (java.util.List)8 Test (org.junit.jupiter.api.Test)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 CountDownLatch (java.util.concurrent.CountDownLatch)7