Search in sources :

Example 56 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class PulsarPartitionSplitReaderBase method fetch.

@Override
public RecordsWithSplitIds<PulsarMessage<OUT>> fetch() throws IOException {
    RecordsBySplits.Builder<PulsarMessage<OUT>> builder = new RecordsBySplits.Builder<>();
    // Return when no split registered to this reader.
    if (pulsarConsumer == null || registeredSplit == null) {
        return builder.build();
    }
    // Set wakeup to false for start consuming.
    wakeup.compareAndSet(true, false);
    StopCursor stopCursor = registeredSplit.getStopCursor();
    String splitId = registeredSplit.splitId();
    PulsarMessageCollector<OUT> collector = new PulsarMessageCollector<>(splitId, builder);
    Deadline deadline = Deadline.fromNow(sourceConfiguration.getMaxFetchTime());
    // Consume message from pulsar until it was woke up by flink reader.
    for (int messageNum = 0; messageNum < sourceConfiguration.getMaxFetchRecords() && deadline.hasTimeLeft() && isNotWakeup(); messageNum++) {
        try {
            Duration timeout = deadline.timeLeftIfAny();
            Message<byte[]> message = pollMessage(timeout);
            if (message == null) {
                break;
            }
            // Deserialize message.
            collector.setMessage(message);
            deserializationSchema.deserialize(message, collector);
            // Acknowledge message if need.
            finishedPollMessage(message);
            if (stopCursor.shouldStop(message)) {
                builder.addFinishedSplit(splitId);
                break;
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            break;
        } catch (TimeoutException e) {
            break;
        } catch (ExecutionException e) {
            LOG.error("Error in polling message from pulsar consumer.", e);
            break;
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    return builder.build();
}
Also used : ConsumerBuilder(org.apache.pulsar.client.api.ConsumerBuilder) PulsarSourceConfigUtils.createConsumerBuilder(org.apache.flink.connector.pulsar.source.config.PulsarSourceConfigUtils.createConsumerBuilder) Deadline(org.apache.flink.api.common.time.Deadline) Duration(java.time.Duration) IOException(java.io.IOException) RecordsBySplits(org.apache.flink.connector.base.source.reader.RecordsBySplits) TimeoutException(java.util.concurrent.TimeoutException) PulsarClientException(org.apache.pulsar.client.api.PulsarClientException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) StopCursor(org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor) PulsarMessage(org.apache.flink.connector.pulsar.source.reader.message.PulsarMessage) PulsarMessageCollector(org.apache.flink.connector.pulsar.source.reader.message.PulsarMessageCollector) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 57 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class JMXJobManagerMetricTest method testJobManagerJMXMetricAccess.

/**
 * Tests that metrics registered on the JobManager are actually accessible via JMX.
 */
@Test
void testJobManagerJMXMetricAccess(@InjectClusterClient ClusterClient<?> client) throws Exception {
    Deadline deadline = Deadline.now().plus(Duration.ofMinutes(2));
    try {
        JobVertex sourceJobVertex = new JobVertex("Source");
        sourceJobVertex.setInvokableClass(BlockingInvokable.class);
        sourceJobVertex.setParallelism(1);
        final JobCheckpointingSettings jobCheckpointingSettings = new JobCheckpointingSettings(new CheckpointCoordinatorConfiguration(500, 500, 50, 5, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, 0, 0), null);
        final JobGraph jobGraph = JobGraphBuilder.newStreamingJobGraphBuilder().setJobName("TestingJob").addJobVertex(sourceJobVertex).setJobCheckpointingSettings(jobCheckpointingSettings).build();
        client.submitJob(jobGraph).get();
        FutureUtils.retrySuccessfulWithDelay(() -> client.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> status == JobStatus.RUNNING, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
        MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
        Set<ObjectName> nameSet = mBeanServer.queryNames(new ObjectName("org.apache.flink.jobmanager.job.lastCheckpointSize:job_name=TestingJob,*"), null);
        assertThat(nameSet).hasSize(1);
        assertThat(mBeanServer.getAttribute(nameSet.iterator().next(), "Value")).isEqualTo(-1L);
        BlockingInvokable.unblock();
    } finally {
        BlockingInvokable.unblock();
    }
}
Also used : OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) Deadline(org.apache.flink.api.common.time.Deadline) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CheckpointCoordinatorConfiguration(org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration) JobStatus(org.apache.flink.api.common.JobStatus) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) InjectClusterClient(org.apache.flink.test.junit5.InjectClusterClient) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) RegisterExtension(org.junit.jupiter.api.extension.RegisterExtension) MBeanServer(javax.management.MBeanServer) Duration(java.time.Duration) ConfigConstants(org.apache.flink.configuration.ConfigConstants) ManagementFactory(java.lang.management.ManagementFactory) JobCheckpointingSettings(org.apache.flink.runtime.jobgraph.tasks.JobCheckpointingSettings) JobGraphBuilder(org.apache.flink.runtime.jobgraph.JobGraphBuilder) Configuration(org.apache.flink.configuration.Configuration) AbstractInvokable(org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable) MiniClusterExtension(org.apache.flink.test.junit5.MiniClusterExtension) Set(java.util.Set) MetricOptions(org.apache.flink.configuration.MetricOptions) ObjectName(javax.management.ObjectName) Test(org.junit.jupiter.api.Test) TimeUnit(java.util.concurrent.TimeUnit) TestingUtils(org.apache.flink.testutils.TestingUtils) ClusterClient(org.apache.flink.client.program.ClusterClient) JMXReporter(org.apache.flink.metrics.jmx.JMXReporter) Time(org.apache.flink.api.common.time.Time) CheckpointRetentionPolicy(org.apache.flink.runtime.checkpoint.CheckpointRetentionPolicy) Environment(org.apache.flink.runtime.execution.Environment) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Deadline(org.apache.flink.api.common.time.Deadline) JobCheckpointingSettings(org.apache.flink.runtime.jobgraph.tasks.JobCheckpointingSettings) CheckpointCoordinatorConfiguration(org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.jupiter.api.Test)

Example 58 with Deadline

use of org.apache.flink.api.common.time.Deadline in project beam by apache.

the class FlinkPortableClientEntryPoint method startJobService.

private void startJobService() throws Exception {
    jobInvokerFactory = new DetachedJobInvokerFactory();
    jobServer = FlinkJobServerDriver.fromConfig(FlinkJobServerDriver.parseArgs(new String[] { "--job-port=" + jobPort, "--artifact-port=0", "--expansion-port=0" }), jobInvokerFactory);
    jobServerThread = new Thread(jobServer);
    jobServerThread.start();
    Deadline deadline = Deadline.fromNow(JOB_SERVICE_STARTUP_TIMEOUT);
    while (jobServer.getJobServerUrl() == null && deadline.hasTimeLeft()) {
        try {
            Thread.sleep(500);
        } catch (InterruptedException interruptEx) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(interruptEx);
        }
    }
    if (!jobServerThread.isAlive()) {
        throw new IllegalStateException("Job service thread is not alive");
    }
    if (jobServer.getJobServerUrl() == null) {
        String msg = String.format("Timeout of %s waiting for job service to start.", deadline);
        throw new TimeoutException(msg);
    }
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) TimeoutException(java.util.concurrent.TimeoutException)

Example 59 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class JobMasterTest method waitUntilAllExecutionsAreScheduledOrDeployed.

private void waitUntilAllExecutionsAreScheduledOrDeployed(final JobMasterGateway jobMasterGateway) throws Exception {
    final Duration duration = Duration.ofMillis(testingTimeout.toMilliseconds());
    final Deadline deadline = Deadline.fromNow(duration);
    CommonTestUtils.waitUntilCondition(() -> {
        final Collection<AccessExecution> executions = getExecutions(jobMasterGateway);
        return !executions.isEmpty() && executions.stream().allMatch(execution -> execution.getState() == ExecutionState.SCHEDULED || execution.getState() == ExecutionState.DEPLOYING);
    }, deadline);
}
Also used : TaskManagerGateway(org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway) DefaultSchedulerFactory(org.apache.flink.runtime.scheduler.DefaultSchedulerFactory) TestingTaskExecutorGateway(org.apache.flink.runtime.taskexecutor.TestingTaskExecutorGateway) Arrays(java.util.Arrays) Tuple3(org.apache.flink.api.java.tuple.Tuple3) SlotPoolService(org.apache.flink.runtime.jobmaster.slotpool.SlotPoolService) JobMasterBuilder(org.apache.flink.runtime.jobmaster.utils.JobMasterBuilder) RestartStrategyOptions(org.apache.flink.configuration.RestartStrategyOptions) PerJobCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.PerJobCheckpointRecoveryFactory) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) SettableLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.SettableLeaderRetrievalService) PhysicalSlot(org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlot) TestingFatalErrorHandler(org.apache.flink.runtime.util.TestingFatalErrorHandler) Duration(java.time.Duration) Map(java.util.Map) Matchers.nullValue(org.hamcrest.Matchers.nullValue) CompletedCheckpoint(org.apache.flink.runtime.checkpoint.CompletedCheckpoint) ClassRule(org.junit.ClassRule) SimpleSlotContext(org.apache.flink.runtime.instance.SimpleSlotContext) SlotPoolServiceFactory(org.apache.flink.runtime.jobmaster.slotpool.SlotPoolServiceFactory) AfterClass(org.junit.AfterClass) BlockingQueue(java.util.concurrent.BlockingQueue) JobManagerOptions(org.apache.flink.configuration.JobManagerOptions) Category(org.junit.experimental.categories.Category) HeartbeatServices(org.apache.flink.runtime.heartbeat.HeartbeatServices) SlotOffer(org.apache.flink.runtime.taskexecutor.slot.SlotOffer) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) CountDownLatch(java.util.concurrent.CountDownLatch) TimeUtils(org.apache.flink.util.TimeUtils) Matchers.is(org.hamcrest.Matchers.is) Time(org.apache.flink.api.common.time.Time) InputSplitSource(org.apache.flink.core.io.InputSplitSource) ResourceManagerGateway(org.apache.flink.runtime.resourcemanager.ResourceManagerGateway) FlinkException(org.apache.flink.util.FlinkException) ComponentMainThreadExecutor(org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor) AccessExecution(org.apache.flink.runtime.executiongraph.AccessExecution) JobStatus(org.apache.flink.api.common.JobStatus) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) DefaultInputSplitAssigner(org.apache.flink.api.common.io.DefaultInputSplitAssigner) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) PartitionProducerDisposedException(org.apache.flink.runtime.jobmanager.PartitionProducerDisposedException) BiConsumer(java.util.function.BiConsumer) Matchers.hasSize(org.hamcrest.Matchers.hasSize) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) DistributionPattern(org.apache.flink.runtime.jobgraph.DistributionPattern) Nullable(javax.annotation.Nullable) CheckpointProperties(org.apache.flink.runtime.checkpoint.CheckpointProperties) Before(org.junit.Before) InputSplitAssigner(org.apache.flink.core.io.InputSplitAssigner) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) LocalUnresolvedTaskManagerLocation(org.apache.flink.runtime.taskmanager.LocalUnresolvedTaskManagerLocation) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) InputSplit(org.apache.flink.core.io.InputSplit) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) CheckpointsCleaner(org.apache.flink.runtime.checkpoint.CheckpointsCleaner) Test(org.junit.Test) IOException(java.io.IOException) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) StandaloneCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.StandaloneCheckpointRecoveryFactory) TestingTaskExecutorGatewayBuilder(org.apache.flink.runtime.taskexecutor.TestingTaskExecutorGatewayBuilder) UnresolvedTaskManagerLocation(org.apache.flink.runtime.taskmanager.UnresolvedTaskManagerLocation) ArrayDeque(java.util.ArrayDeque) TestingHighAvailabilityServices(org.apache.flink.runtime.highavailability.TestingHighAvailabilityServices) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) CheckpointRetentionPolicy(org.apache.flink.runtime.checkpoint.CheckpointRetentionPolicy) Deadline(org.apache.flink.api.common.time.Deadline) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) RegistrationResponse(org.apache.flink.runtime.registration.RegistrationResponse) TestingRpcService(org.apache.flink.runtime.rpc.TestingRpcService) BiFunction(java.util.function.BiFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) TimeoutException(java.util.concurrent.TimeoutException) ExceptionUtils(org.apache.flink.util.ExceptionUtils) TaskExecutorGateway(org.apache.flink.runtime.taskexecutor.TaskExecutorGateway) TaskExecutorToJobManagerHeartbeatPayload(org.apache.flink.runtime.taskexecutor.TaskExecutorToJobManagerHeartbeatPayload) AggregateFunction(org.apache.flink.api.common.functions.AggregateFunction) InstantiationUtil(org.apache.flink.util.InstantiationUtil) After(org.junit.After) TestLogger(org.apache.flink.util.TestLogger) TestingSchedulerNGFactory(org.apache.flink.runtime.scheduler.TestingSchedulerNGFactory) Assert.fail(org.junit.Assert.fail) BlobServerOptions(org.apache.flink.configuration.BlobServerOptions) CompletedCheckpointStorageLocation(org.apache.flink.runtime.state.CompletedCheckpointStorageLocation) Collection(java.util.Collection) AbstractInvokable(org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable) ResourceManagerId(org.apache.flink.runtime.resourcemanager.ResourceManagerId) UUID(java.util.UUID) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) Collectors(java.util.stream.Collectors) SlotInfoWithUtilization(org.apache.flink.runtime.jobmaster.slotpool.SlotInfoWithUtilization) Acknowledge(org.apache.flink.runtime.messages.Acknowledge) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) Objects(java.util.Objects) TestingUtils(org.apache.flink.testutils.TestingUtils) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) Matchers.equalTo(org.hamcrest.Matchers.equalTo) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Optional(java.util.Optional) Queue(java.util.Queue) Matchers.anyOf(org.hamcrest.Matchers.anyOf) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) IntStream(java.util.stream.IntStream) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) BeforeClass(org.junit.BeforeClass) AccessExecutionVertex(org.apache.flink.runtime.executiongraph.AccessExecutionVertex) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ResultPartitionType(org.apache.flink.runtime.io.network.partition.ResultPartitionType) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) Function(java.util.function.Function) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) FailoverStrategyFactoryLoader(org.apache.flink.runtime.executiongraph.failover.flip1.FailoverStrategyFactoryLoader) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) FailsWithAdaptiveScheduler(org.apache.flink.testutils.junit.FailsWithAdaptiveScheduler) JobGraphTestUtils(org.apache.flink.runtime.jobgraph.JobGraphTestUtils) TestingSlotPoolServiceBuilder(org.apache.flink.runtime.jobmaster.slotpool.TestingSlotPoolServiceBuilder) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) Nonnull(javax.annotation.Nonnull) StandaloneCompletedCheckpointStore(org.apache.flink.runtime.checkpoint.StandaloneCompletedCheckpointStore) ArchivedExecutionGraph(org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph) SlotPool(org.apache.flink.runtime.jobmaster.slotpool.SlotPool) Matchers.empty(org.hamcrest.Matchers.empty) JobGraphBuilder(org.apache.flink.runtime.jobgraph.JobGraphBuilder) TestingSchedulerNG(org.apache.flink.runtime.scheduler.TestingSchedulerNG) Configuration(org.apache.flink.configuration.Configuration) TestingHeartbeatServices(org.apache.flink.runtime.heartbeat.TestingHeartbeatServices) Matchers(org.hamcrest.Matchers) RpcUtils(org.apache.flink.runtime.rpc.RpcUtils) ExecutionGraphInfo(org.apache.flink.runtime.scheduler.ExecutionGraphInfo) CheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory) TimeUnit(java.util.concurrent.TimeUnit) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TestingResourceManagerGateway(org.apache.flink.runtime.resourcemanager.utils.TestingResourceManagerGateway) ClosureCleaner(org.apache.flink.api.java.ClosureCleaner) TaskExecutionState(org.apache.flink.runtime.taskmanager.TaskExecutionState) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) RecipientUnreachableException(org.apache.flink.runtime.rpc.exceptions.RecipientUnreachableException) NoOpInvokable(org.apache.flink.runtime.testtasks.NoOpInvokable) AccessExecution(org.apache.flink.runtime.executiongraph.AccessExecution) Deadline(org.apache.flink.api.common.time.Deadline) Duration(java.time.Duration)

Example 60 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class MiniClusterResource method cancelAllJobs.

private void cancelAllJobs(boolean waitUntilSlotsAreFreed) {
    try {
        final long shutdownTimeout = miniClusterResourceConfiguration.getShutdownTimeout().toMilliseconds();
        final Deadline jobCancellationDeadline = Deadline.fromNow(Duration.ofMillis(shutdownTimeout));
        final List<CompletableFuture<Acknowledge>> jobCancellationFutures = miniCluster.listJobs().get(jobCancellationDeadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS).stream().filter(status -> !status.getJobState().isGloballyTerminalState()).map(status -> miniCluster.cancelJob(status.getJobId())).collect(Collectors.toList());
        FutureUtils.waitForAll(jobCancellationFutures).get(jobCancellationDeadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
        CommonTestUtils.waitUntilCondition(() -> {
            final long unfinishedJobs = miniCluster.listJobs().get(jobCancellationDeadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS).stream().filter(status -> !status.getJobState().isGloballyTerminalState()).count();
            return unfinishedJobs == 0;
        }, jobCancellationDeadline);
        if (waitUntilSlotsAreFreed) {
            CommonTestUtils.waitUntilCondition(() -> {
                final ResourceOverview resourceOverview = miniCluster.getResourceOverview().get();
                return resourceOverview.getNumberRegisteredSlots() == resourceOverview.getNumberFreeSlots();
            }, jobCancellationDeadline);
        }
    } catch (Exception e) {
        log.warn("Exception while shutting down remaining jobs.", e);
    }
}
Also used : MiniClusterConfiguration(org.apache.flink.runtime.minicluster.MiniClusterConfiguration) Deadline(org.apache.flink.api.common.time.Deadline) LoggerFactory(org.slf4j.LoggerFactory) ExceptionUtils(org.apache.flink.util.ExceptionUtils) CompletableFuture(java.util.concurrent.CompletableFuture) MemorySize(org.apache.flink.configuration.MemorySize) UnmodifiableConfiguration(org.apache.flink.configuration.UnmodifiableConfiguration) TaskManagerOptions(org.apache.flink.configuration.TaskManagerOptions) ResourceOverview(org.apache.flink.runtime.resourcemanager.ResourceOverview) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) Duration(java.time.Duration) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) RestOptions(org.apache.flink.configuration.RestOptions) URI(java.net.URI) HeartbeatManagerOptions(org.apache.flink.configuration.HeartbeatManagerOptions) Logger(org.slf4j.Logger) Configuration(org.apache.flink.configuration.Configuration) Reference(org.apache.flink.util.Reference) JobManagerOptions(org.apache.flink.configuration.JobManagerOptions) Preconditions(org.apache.flink.util.Preconditions) Collectors(java.util.stream.Collectors) Acknowledge(org.apache.flink.runtime.messages.Acknowledge) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) RpcSystem(org.apache.flink.runtime.rpc.RpcSystem) ExternalResource(org.junit.rules.ExternalResource) CoreOptions(org.apache.flink.configuration.CoreOptions) PseudoRandomValueSelector.randomize(org.apache.flink.runtime.testutils.PseudoRandomValueSelector.randomize) TemporaryFolder(org.junit.rules.TemporaryFolder) CompletableFuture(java.util.concurrent.CompletableFuture) Deadline(org.apache.flink.api.common.time.Deadline) ResourceOverview(org.apache.flink.runtime.resourcemanager.ResourceOverview)

Aggregations

Deadline (org.apache.flink.api.common.time.Deadline)75 Test (org.junit.Test)34 JobID (org.apache.flink.api.common.JobID)29 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)26 Duration (java.time.Duration)19 Configuration (org.apache.flink.configuration.Configuration)15 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)14 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)13 IOException (java.io.IOException)12 ExecutionException (java.util.concurrent.ExecutionException)12 KeySelector (org.apache.flink.api.java.functions.KeySelector)12 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 MiniCluster (org.apache.flink.runtime.minicluster.MiniCluster)10 File (java.io.File)9 TimeUnit (java.util.concurrent.TimeUnit)9 JobStatus (org.apache.flink.api.common.JobStatus)9 List (java.util.List)8 Test (org.junit.jupiter.api.Test)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 CountDownLatch (java.util.concurrent.CountDownLatch)7