Search in sources :

Example 46 with UnregisteredMetricsGroup

use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.

the class CheckpointStatsTrackerTest method testCreateSnapshot.

/**
 * Tests that snapshots are only created if a new snapshot has been reported or updated.
 */
@Test
public void testCreateSnapshot() throws Exception {
    JobVertexID jobVertexID = new JobVertexID();
    CheckpointStatsTracker tracker = new CheckpointStatsTracker(10, new UnregisteredMetricsGroup());
    CheckpointStatsSnapshot snapshot1 = tracker.createSnapshot();
    // Pending checkpoint => new snapshot
    PendingCheckpointStats pending = tracker.reportPendingCheckpoint(0, 1, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), singletonMap(jobVertexID, 1));
    pending.reportSubtaskStats(jobVertexID, createSubtaskStats(0));
    CheckpointStatsSnapshot snapshot2 = tracker.createSnapshot();
    assertNotEquals(snapshot1, snapshot2);
    assertEquals(snapshot2, tracker.createSnapshot());
    // Complete checkpoint => new snapshot
    pending.reportCompletedCheckpoint(null);
    CheckpointStatsSnapshot snapshot3 = tracker.createSnapshot();
    assertNotEquals(snapshot2, snapshot3);
    // Restore operation => new snapshot
    tracker.reportRestoredCheckpoint(new RestoredCheckpointStats(12, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), 12, null));
    CheckpointStatsSnapshot snapshot4 = tracker.createSnapshot();
    assertNotEquals(snapshot3, snapshot4);
    assertEquals(snapshot4, tracker.createSnapshot());
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) Test(org.junit.Test)

Example 47 with UnregisteredMetricsGroup

use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.

the class CheckpointStatsTrackerTest method testMetricsAreUpdated.

/**
 * Tests that the metrics are updated properly. We had a bug that required new stats snapshots
 * in order to update the metrics.
 */
@Test
@SuppressWarnings("unchecked")
public void testMetricsAreUpdated() throws Exception {
    final Map<String, Gauge<?>> registeredGauges = new HashMap<>();
    MetricGroup metricGroup = new UnregisteredMetricsGroup() {

        @Override
        public <T, G extends Gauge<T>> G gauge(String name, G gauge) {
            registeredGauges.put(name, gauge);
            return gauge;
        }
    };
    JobVertexID jobVertexID = new JobVertexID();
    ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID).build();
    ExecutionJobVertex jobVertex = graph.getJobVertex(jobVertexID);
    CheckpointStatsTracker stats = new CheckpointStatsTracker(0, metricGroup);
    // Make sure to adjust this test if metrics are added/removed
    assertEquals(10, registeredGauges.size());
    // Check initial values
    Gauge<Long> numCheckpoints = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.NUMBER_OF_CHECKPOINTS_METRIC);
    Gauge<Integer> numInProgressCheckpoints = (Gauge<Integer>) registeredGauges.get(CheckpointStatsTracker.NUMBER_OF_IN_PROGRESS_CHECKPOINTS_METRIC);
    Gauge<Long> numCompletedCheckpoints = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.NUMBER_OF_COMPLETED_CHECKPOINTS_METRIC);
    Gauge<Long> numFailedCheckpoints = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.NUMBER_OF_FAILED_CHECKPOINTS_METRIC);
    Gauge<Long> latestRestoreTimestamp = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.LATEST_RESTORED_CHECKPOINT_TIMESTAMP_METRIC);
    Gauge<Long> latestCompletedSize = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.LATEST_COMPLETED_CHECKPOINT_SIZE_METRIC);
    Gauge<Long> latestCompletedDuration = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.LATEST_COMPLETED_CHECKPOINT_DURATION_METRIC);
    Gauge<Long> latestProcessedData = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.LATEST_COMPLETED_CHECKPOINT_PROCESSED_DATA_METRIC);
    Gauge<Long> latestPersistedData = (Gauge<Long>) registeredGauges.get(CheckpointStatsTracker.LATEST_COMPLETED_CHECKPOINT_PERSISTED_DATA_METRIC);
    Gauge<String> latestCompletedExternalPath = (Gauge<String>) registeredGauges.get(CheckpointStatsTracker.LATEST_COMPLETED_CHECKPOINT_EXTERNAL_PATH_METRIC);
    assertEquals(Long.valueOf(0), numCheckpoints.getValue());
    assertEquals(Integer.valueOf(0), numInProgressCheckpoints.getValue());
    assertEquals(Long.valueOf(0), numCompletedCheckpoints.getValue());
    assertEquals(Long.valueOf(0), numFailedCheckpoints.getValue());
    assertEquals(Long.valueOf(-1), latestRestoreTimestamp.getValue());
    assertEquals(Long.valueOf(-1), latestCompletedSize.getValue());
    assertEquals(Long.valueOf(-1), latestCompletedDuration.getValue());
    assertEquals(Long.valueOf(-1), latestProcessedData.getValue());
    assertEquals(Long.valueOf(-1), latestPersistedData.getValue());
    assertEquals("n/a", latestCompletedExternalPath.getValue());
    PendingCheckpointStats pending = stats.reportPendingCheckpoint(0, 0, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), singletonMap(jobVertexID, 1));
    // Check counts
    assertEquals(Long.valueOf(1), numCheckpoints.getValue());
    assertEquals(Integer.valueOf(1), numInProgressCheckpoints.getValue());
    assertEquals(Long.valueOf(0), numCompletedCheckpoints.getValue());
    assertEquals(Long.valueOf(0), numFailedCheckpoints.getValue());
    long ackTimestamp = 11231230L;
    long stateSize = 12381238L;
    long processedData = 4242L;
    long persistedData = 4444L;
    long ignored = 0;
    String externalPath = "myexternalpath";
    SubtaskStateStats subtaskStats = new SubtaskStateStats(0, ackTimestamp, stateSize, stateSize, ignored, ignored, processedData, persistedData, ignored, ignored, false, true);
    assertTrue(pending.reportSubtaskStats(jobVertexID, subtaskStats));
    pending.reportCompletedCheckpoint(externalPath);
    // Verify completed checkpoint updated
    assertEquals(Long.valueOf(1), numCheckpoints.getValue());
    assertEquals(Integer.valueOf(0), numInProgressCheckpoints.getValue());
    assertEquals(Long.valueOf(1), numCompletedCheckpoints.getValue());
    assertEquals(Long.valueOf(0), numFailedCheckpoints.getValue());
    assertEquals(Long.valueOf(-1), latestRestoreTimestamp.getValue());
    assertEquals(Long.valueOf(stateSize), latestCompletedSize.getValue());
    assertEquals(Long.valueOf(processedData), latestProcessedData.getValue());
    assertEquals(Long.valueOf(persistedData), latestPersistedData.getValue());
    assertEquals(Long.valueOf(ackTimestamp), latestCompletedDuration.getValue());
    assertEquals(externalPath, latestCompletedExternalPath.getValue());
    // Check failed
    PendingCheckpointStats nextPending = stats.reportPendingCheckpoint(1, 11, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), singletonMap(jobVertexID, 1));
    long failureTimestamp = 1230123L;
    nextPending.reportFailedCheckpoint(failureTimestamp, null);
    // Verify updated
    assertEquals(Long.valueOf(2), numCheckpoints.getValue());
    assertEquals(Integer.valueOf(0), numInProgressCheckpoints.getValue());
    assertEquals(Long.valueOf(1), numCompletedCheckpoints.getValue());
    // one failed now
    assertEquals(Long.valueOf(1), numFailedCheckpoints.getValue());
    // Check restore
    long restoreTimestamp = 183419283L;
    RestoredCheckpointStats restored = new RestoredCheckpointStats(1, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), restoreTimestamp, null);
    stats.reportRestoredCheckpoint(restored);
    assertEquals(Long.valueOf(2), numCheckpoints.getValue());
    assertEquals(Integer.valueOf(0), numInProgressCheckpoints.getValue());
    assertEquals(Long.valueOf(1), numCompletedCheckpoints.getValue());
    assertEquals(Long.valueOf(1), numFailedCheckpoints.getValue());
    assertEquals(Long.valueOf(restoreTimestamp), latestRestoreTimestamp.getValue());
    // Check Internal Checkpoint Configuration
    PendingCheckpointStats thirdPending = stats.reportPendingCheckpoint(2, 5000, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), singletonMap(jobVertexID, 1));
    thirdPending.reportSubtaskStats(jobVertexID, subtaskStats);
    thirdPending.reportCompletedCheckpoint(null);
    // Verify external path is "n/a", because internal checkpoint won't generate external path.
    assertEquals("n/a", latestCompletedExternalPath.getValue());
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) MetricGroup(org.apache.flink.metrics.MetricGroup) Gauge(org.apache.flink.metrics.Gauge) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) Test(org.junit.Test)

Example 48 with UnregisteredMetricsGroup

use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.

the class StreamTaskStateInitializerImplTest method testWithRestore.

@SuppressWarnings("unchecked")
@Test
public void testWithRestore() throws Exception {
    StateBackend mockingBackend = spy(new StateBackend() {

        @Override
        public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws Exception {
            return mock(AbstractKeyedStateBackend.class);
        }

        @Override
        public OperatorStateBackend createOperatorStateBackend(Environment env, String operatorIdentifier, @Nonnull Collection<OperatorStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws Exception {
            return mock(OperatorStateBackend.class);
        }
    });
    OperatorID operatorID = new OperatorID(47L, 11L);
    TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
    Random random = new Random(0x42);
    OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setManagedOperatorState(new OperatorStreamStateHandle(Collections.singletonMap("a", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10 }, SPLIT_DISTRIBUTE)), CheckpointTestUtils.createDummyStreamStateHandle(random, null))).setRawOperatorState(new OperatorStreamStateHandle(Collections.singletonMap("_default_", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 20, 30 }, SPLIT_DISTRIBUTE)), CheckpointTestUtils.createDummyStreamStateHandle(random, null))).setManagedKeyedState(CheckpointTestUtils.createDummyKeyGroupStateHandle(random, null)).setRawKeyedState(CheckpointTestUtils.createDummyKeyGroupStateHandle(random, null)).setInputChannelState(singleton(createNewInputChannelStateHandle(10, random))).setResultSubpartitionState(singleton(createNewResultSubpartitionStateHandle(10, random))).build();
    taskStateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState);
    JobManagerTaskRestore jobManagerTaskRestore = new JobManagerTaskRestore(42L, taskStateSnapshot);
    StreamTaskStateInitializer streamTaskStateManager = streamTaskStateManager(mockingBackend, jobManagerTaskRestore, false);
    AbstractStreamOperator<?> streamOperator = mock(AbstractStreamOperator.class);
    when(streamOperator.getOperatorID()).thenReturn(operatorID);
    TypeSerializer<?> typeSerializer = new IntSerializer();
    CloseableRegistry closeableRegistry = new CloseableRegistry();
    StreamOperatorStateContext stateContext = streamTaskStateManager.streamOperatorStateContext(streamOperator.getOperatorID(), streamOperator.getClass().getSimpleName(), new TestProcessingTimeService(), streamOperator, typeSerializer, closeableRegistry, new UnregisteredMetricsGroup(), 1.0, false);
    OperatorStateBackend operatorStateBackend = stateContext.operatorStateBackend();
    CheckpointableKeyedStateBackend<?> keyedStateBackend = stateContext.keyedStateBackend();
    InternalTimeServiceManager<?> timeServiceManager = stateContext.internalTimerServiceManager();
    CloseableIterable<KeyGroupStatePartitionStreamProvider> keyedStateInputs = stateContext.rawKeyedStateInputs();
    CloseableIterable<StatePartitionStreamProvider> operatorStateInputs = stateContext.rawOperatorStateInputs();
    Assert.assertTrue("Expected the context to be restored", stateContext.isRestored());
    Assert.assertEquals(OptionalLong.of(42L), stateContext.getRestoredCheckpointId());
    Assert.assertNotNull(operatorStateBackend);
    Assert.assertNotNull(keyedStateBackend);
    // this is deactivated on purpose so that it does not attempt to consume the raw keyed
    // state.
    Assert.assertNull(timeServiceManager);
    Assert.assertNotNull(keyedStateInputs);
    Assert.assertNotNull(operatorStateInputs);
    int count = 0;
    for (KeyGroupStatePartitionStreamProvider keyedStateInput : keyedStateInputs) {
        ++count;
    }
    Assert.assertEquals(1, count);
    count = 0;
    for (StatePartitionStreamProvider operatorStateInput : operatorStateInputs) {
        ++count;
    }
    Assert.assertEquals(3, count);
    checkCloseablesRegistered(closeableRegistry, operatorStateBackend, keyedStateBackend, keyedStateInputs, operatorStateInputs);
}
Also used : IntSerializer(org.apache.flink.api.common.typeutils.base.IntSerializer) UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) MetricGroup(org.apache.flink.metrics.MetricGroup) JobManagerTaskRestore(org.apache.flink.runtime.checkpoint.JobManagerTaskRestore) TaskKvStateRegistry(org.apache.flink.runtime.query.TaskKvStateRegistry) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) StateBackend(org.apache.flink.runtime.state.StateBackend) OperatorStateBackend(org.apache.flink.runtime.state.OperatorStateBackend) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) AbstractKeyedStateBackend(org.apache.flink.runtime.state.AbstractKeyedStateBackend) CheckpointableKeyedStateBackend(org.apache.flink.runtime.state.CheckpointableKeyedStateBackend) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) KeyGroupStatePartitionStreamProvider(org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider) KeyGroupStatePartitionStreamProvider(org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider) StatePartitionStreamProvider(org.apache.flink.runtime.state.StatePartitionStreamProvider) TaskStateSnapshot(org.apache.flink.runtime.checkpoint.TaskStateSnapshot) Random(java.util.Random) TtlTimeProvider(org.apache.flink.runtime.state.ttl.TtlTimeProvider) AbstractKeyedStateBackend(org.apache.flink.runtime.state.AbstractKeyedStateBackend) OperatorStreamStateHandle(org.apache.flink.runtime.state.OperatorStreamStateHandle) OperatorStateBackend(org.apache.flink.runtime.state.OperatorStateBackend) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) Environment(org.apache.flink.runtime.execution.Environment) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID) TaskStateManagerImplTest(org.apache.flink.runtime.state.TaskStateManagerImplTest) Test(org.junit.Test)

Example 49 with UnregisteredMetricsGroup

use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.

the class StreamTaskStateInitializerImplTest method testNoRestore.

@Test
public void testNoRestore() throws Exception {
    MemoryStateBackend stateBackend = spy(new MemoryStateBackend(1024));
    // No job manager provided state to restore
    StreamTaskStateInitializer streamTaskStateManager = streamTaskStateManager(stateBackend, null, true);
    OperatorID operatorID = new OperatorID(47L, 11L);
    AbstractStreamOperator<?> streamOperator = mock(AbstractStreamOperator.class);
    when(streamOperator.getOperatorID()).thenReturn(operatorID);
    TypeSerializer<?> typeSerializer = new IntSerializer();
    CloseableRegistry closeableRegistry = new CloseableRegistry();
    StreamOperatorStateContext stateContext = streamTaskStateManager.streamOperatorStateContext(streamOperator.getOperatorID(), streamOperator.getClass().getSimpleName(), new TestProcessingTimeService(), streamOperator, typeSerializer, closeableRegistry, new UnregisteredMetricsGroup(), 1.0, false);
    OperatorStateBackend operatorStateBackend = stateContext.operatorStateBackend();
    CheckpointableKeyedStateBackend<?> keyedStateBackend = stateContext.keyedStateBackend();
    InternalTimeServiceManager<?> timeServiceManager = stateContext.internalTimerServiceManager();
    CloseableIterable<KeyGroupStatePartitionStreamProvider> keyedStateInputs = stateContext.rawKeyedStateInputs();
    CloseableIterable<StatePartitionStreamProvider> operatorStateInputs = stateContext.rawOperatorStateInputs();
    Assert.assertFalse("Expected the context to NOT be restored", stateContext.isRestored());
    Assert.assertNotNull(operatorStateBackend);
    Assert.assertNotNull(keyedStateBackend);
    Assert.assertNotNull(timeServiceManager);
    Assert.assertNotNull(keyedStateInputs);
    Assert.assertNotNull(operatorStateInputs);
    checkCloseablesRegistered(closeableRegistry, operatorStateBackend, keyedStateBackend, keyedStateInputs, operatorStateInputs);
    Assert.assertFalse(keyedStateInputs.iterator().hasNext());
    Assert.assertFalse(operatorStateInputs.iterator().hasNext());
}
Also used : IntSerializer(org.apache.flink.api.common.typeutils.base.IntSerializer) UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) KeyGroupStatePartitionStreamProvider(org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider) KeyGroupStatePartitionStreamProvider(org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider) StatePartitionStreamProvider(org.apache.flink.runtime.state.StatePartitionStreamProvider) OperatorStateBackend(org.apache.flink.runtime.state.OperatorStateBackend) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) TaskStateManagerImplTest(org.apache.flink.runtime.state.TaskStateManagerImplTest) Test(org.junit.Test)

Example 50 with UnregisteredMetricsGroup

use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.

the class StreamingRuntimeContextTest method createMapPlainMockOp.

@SuppressWarnings("unchecked")
private static AbstractStreamOperator<?> createMapPlainMockOp() throws Exception {
    AbstractStreamOperator<?> operatorMock = mock(AbstractStreamOperator.class);
    ExecutionConfig config = new ExecutionConfig();
    KeyedStateBackend keyedStateBackend = mock(KeyedStateBackend.class);
    DefaultKeyedStateStore keyedStateStore = new DefaultKeyedStateStore(keyedStateBackend, config);
    when(operatorMock.getExecutionConfig()).thenReturn(config);
    doAnswer(new Answer<MapState<Integer, String>>() {

        @Override
        public MapState<Integer, String> answer(InvocationOnMock invocationOnMock) throws Throwable {
            MapStateDescriptor<Integer, String> descr = (MapStateDescriptor<Integer, String>) invocationOnMock.getArguments()[2];
            AbstractKeyedStateBackend<Integer> backend = new MemoryStateBackend().createKeyedStateBackend(new DummyEnvironment("test_task", 1, 0), new JobID(), "test_op", IntSerializer.INSTANCE, 1, new KeyGroupRange(0, 0), new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()), TtlTimeProvider.DEFAULT, new UnregisteredMetricsGroup(), Collections.emptyList(), new CloseableRegistry());
            backend.setCurrentKey(0);
            return backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, descr);
        }
    }).when(keyedStateBackend).getPartitionedState(Matchers.any(), any(TypeSerializer.class), any(MapStateDescriptor.class));
    when(operatorMock.getKeyedStateStore()).thenReturn(keyedStateStore);
    when(operatorMock.getOperatorID()).thenReturn(new OperatorID());
    when(operatorMock.getProcessingTimeService()).thenReturn(new TestProcessingTimeService());
    return operatorMock;
}
Also used : AbstractKeyedStateBackend(org.apache.flink.runtime.state.AbstractKeyedStateBackend) KeyedStateBackend(org.apache.flink.runtime.state.KeyedStateBackend) KvStateRegistry(org.apache.flink.runtime.query.KvStateRegistry) UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) MapStateDescriptor(org.apache.flink.api.common.state.MapStateDescriptor) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) MapState(org.apache.flink.api.common.state.MapState) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) AbstractKeyedStateBackend(org.apache.flink.runtime.state.AbstractKeyedStateBackend) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) JobID(org.apache.flink.api.common.JobID) DefaultKeyedStateStore(org.apache.flink.runtime.state.DefaultKeyedStateStore)

Aggregations

UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)50 Test (org.junit.Test)28 CloseableRegistry (org.apache.flink.core.fs.CloseableRegistry)17 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)15 JobID (org.apache.flink.api.common.JobID)14 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)14 HashMap (java.util.HashMap)11 TestProcessingTimeService (org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService)10 MetricGroup (org.apache.flink.metrics.MetricGroup)9 MemoryStateBackend (org.apache.flink.runtime.state.memory.MemoryStateBackend)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)8 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)7 Configuration (org.apache.flink.configuration.Configuration)7 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)7 DummyEnvironment (org.apache.flink.runtime.operators.testutils.DummyEnvironment)7 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)7 ArrayList (java.util.ArrayList)6 List (java.util.List)6 Properties (java.util.Properties)6 ExecutionGraph (org.apache.flink.runtime.executiongraph.ExecutionGraph)6