Search in sources :

Example 1 with TaskStateHandles

use of org.apache.flink.runtime.state.TaskStateHandles in project flink by apache.

the class StreamTaskTest method createTask.

public static Task createTask(Class<? extends AbstractInvokable> invokable, StreamConfig taskConfig, Configuration taskManagerConfig) throws Exception {
    LibraryCacheManager libCache = mock(LibraryCacheManager.class);
    when(libCache.getClassLoader(any(JobID.class))).thenReturn(StreamTaskTest.class.getClassLoader());
    ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
    ResultPartitionConsumableNotifier consumableNotifier = mock(ResultPartitionConsumableNotifier.class);
    PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class);
    Executor executor = mock(Executor.class);
    NetworkEnvironment network = mock(NetworkEnvironment.class);
    when(network.getResultPartitionManager()).thenReturn(partitionManager);
    when(network.getDefaultIOMode()).thenReturn(IOManager.IOMode.SYNC);
    when(network.createKvStateTaskRegistry(any(JobID.class), any(JobVertexID.class))).thenReturn(mock(TaskKvStateRegistry.class));
    JobInformation jobInformation = new JobInformation(new JobID(), "Job Name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList());
    TaskInformation taskInformation = new TaskInformation(new JobVertexID(), "Test Task", 1, 1, invokable.getName(), taskConfig.getConfiguration());
    return new Task(jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, new TaskStateHandles(), mock(MemoryManager.class), mock(IOManager.class), network, mock(BroadcastVariableManager.class), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), libCache, mock(FileCache.class), new TestingTaskManagerRuntimeInfo(taskManagerConfig, new String[] { System.getProperty("java.io.tmpdir") }), new UnregisteredTaskMetricsGroup(), consumableNotifier, partitionProducerStateChecker, executor);
}
Also used : Task(org.apache.flink.runtime.taskmanager.Task) Configuration(org.apache.flink.configuration.Configuration) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TaskKvStateRegistry(org.apache.flink.runtime.query.TaskKvStateRegistry) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Matchers.anyString(org.mockito.Matchers.anyString) TaskManagerActions(org.apache.flink.runtime.taskmanager.TaskManagerActions) Executor(java.util.concurrent.Executor) TestingTaskManagerRuntimeInfo(org.apache.flink.runtime.util.TestingTaskManagerRuntimeInfo) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) PartitionProducerStateChecker(org.apache.flink.runtime.io.network.netty.PartitionProducerStateChecker) ResultPartitionConsumableNotifier(org.apache.flink.runtime.io.network.partition.ResultPartitionConsumableNotifier) InputSplitProvider(org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider) UnregisteredTaskMetricsGroup(org.apache.flink.runtime.operators.testutils.UnregisteredTaskMetricsGroup) JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) LibraryCacheManager(org.apache.flink.runtime.execution.librarycache.LibraryCacheManager) ResultPartitionManager(org.apache.flink.runtime.io.network.partition.ResultPartitionManager) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) FileCache(org.apache.flink.runtime.filecache.FileCache) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) NetworkEnvironment(org.apache.flink.runtime.io.network.NetworkEnvironment) JobID(org.apache.flink.api.common.JobID)

Example 2 with TaskStateHandles

use of org.apache.flink.runtime.state.TaskStateHandles in project flink by apache.

the class OneInputStreamTaskTest method testSnapshottingAndRestoring.

/**
	 * Tests that the stream operator can snapshot and restore the operator state of chained
	 * operators
	 */
@Test
public void testSnapshottingAndRestoring() throws Exception {
    final Deadline deadline = new FiniteDuration(2, TimeUnit.MINUTES).fromNow();
    final OneInputStreamTask<String, String> streamTask = new OneInputStreamTask<String, String>();
    final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<String, String>(streamTask, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
    testHarness.setupOutputForSingletonOperatorChain();
    IdentityKeySelector<String> keySelector = new IdentityKeySelector<>();
    testHarness.configureForKeyedStream(keySelector, BasicTypeInfo.STRING_TYPE_INFO);
    long checkpointId = 1L;
    long checkpointTimestamp = 1L;
    long recoveryTimestamp = 3L;
    long seed = 2L;
    int numberChainedTasks = 11;
    StreamConfig streamConfig = testHarness.getStreamConfig();
    configureChainedTestingStreamOperator(streamConfig, numberChainedTasks, seed, recoveryTimestamp);
    AcknowledgeStreamMockEnvironment env = new AcknowledgeStreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.executionConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize);
    // reset number of restore calls
    TestingStreamOperator.numberRestoreCalls = 0;
    testHarness.invoke(env);
    testHarness.waitForTaskRunning(deadline.timeLeft().toMillis());
    CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, checkpointTimestamp);
    while (!streamTask.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forFullCheckpoint())) ;
    // since no state was set, there shouldn't be restore calls
    assertEquals(0, TestingStreamOperator.numberRestoreCalls);
    env.getCheckpointLatch().await();
    assertEquals(checkpointId, env.getCheckpointId());
    testHarness.endInput();
    testHarness.waitForTaskCompletion(deadline.timeLeft().toMillis());
    final OneInputStreamTask<String, String> restoredTask = new OneInputStreamTask<String, String>();
    restoredTask.setInitialState(new TaskStateHandles(env.getCheckpointStateHandles()));
    final OneInputStreamTaskTestHarness<String, String> restoredTaskHarness = new OneInputStreamTaskTestHarness<String, String>(restoredTask, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
    restoredTaskHarness.configureForKeyedStream(keySelector, BasicTypeInfo.STRING_TYPE_INFO);
    StreamConfig restoredTaskStreamConfig = restoredTaskHarness.getStreamConfig();
    configureChainedTestingStreamOperator(restoredTaskStreamConfig, numberChainedTasks, seed, recoveryTimestamp);
    TestingStreamOperator.numberRestoreCalls = 0;
    restoredTaskHarness.invoke();
    restoredTaskHarness.endInput();
    restoredTaskHarness.waitForTaskCompletion(deadline.timeLeft().toMillis());
    // restore of every chained operator should have been called
    assertEquals(numberChainedTasks, TestingStreamOperator.numberRestoreCalls);
    TestingStreamOperator.numberRestoreCalls = 0;
}
Also used : Deadline(scala.concurrent.duration.Deadline) FiniteDuration(scala.concurrent.duration.FiniteDuration) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) MockInputSplitProvider(org.apache.flink.runtime.operators.testutils.MockInputSplitProvider) Test(org.junit.Test)

Example 3 with TaskStateHandles

use of org.apache.flink.runtime.state.TaskStateHandles in project flink by apache.

the class StateAssignmentOperation method assignTaskStatesToOperatorInstances.

private static void assignTaskStatesToOperatorInstances(TaskState taskState, ExecutionJobVertex executionJobVertex) {
    final int oldParallelism = taskState.getParallelism();
    final int newParallelism = executionJobVertex.getParallelism();
    List<KeyGroupRange> keyGroupPartitions = createKeyGroupPartitions(executionJobVertex.getMaxParallelism(), newParallelism);
    final int chainLength = taskState.getChainLength();
    // operator chain idx -> list of the stored op states from all parallel instances for this chain idx
    @SuppressWarnings("unchecked") List<OperatorStateHandle>[] parallelOpStatesBackend = new List[chainLength];
    @SuppressWarnings("unchecked") List<OperatorStateHandle>[] parallelOpStatesStream = new List[chainLength];
    List<KeyGroupsStateHandle> parallelKeyedStatesBackend = new ArrayList<>(oldParallelism);
    List<KeyGroupsStateHandle> parallelKeyedStateStream = new ArrayList<>(oldParallelism);
    for (int p = 0; p < oldParallelism; ++p) {
        SubtaskState subtaskState = taskState.getState(p);
        if (null != subtaskState) {
            collectParallelStatesByChainOperator(parallelOpStatesBackend, subtaskState.getManagedOperatorState());
            collectParallelStatesByChainOperator(parallelOpStatesStream, subtaskState.getRawOperatorState());
            KeyGroupsStateHandle keyedStateBackend = subtaskState.getManagedKeyedState();
            if (null != keyedStateBackend) {
                parallelKeyedStatesBackend.add(keyedStateBackend);
            }
            KeyGroupsStateHandle keyedStateStream = subtaskState.getRawKeyedState();
            if (null != keyedStateStream) {
                parallelKeyedStateStream.add(keyedStateStream);
            }
        }
    }
    // operator chain index -> lists with collected states (one collection for each parallel subtasks)
    @SuppressWarnings("unchecked") List<Collection<OperatorStateHandle>>[] partitionedParallelStatesBackend = new List[chainLength];
    @SuppressWarnings("unchecked") List<Collection<OperatorStateHandle>>[] partitionedParallelStatesStream = new List[chainLength];
    //TODO here we can employ different redistribution strategies for state, e.g. union state.
    // For now we only offer round robin as the default.
    OperatorStateRepartitioner opStateRepartitioner = RoundRobinOperatorStateRepartitioner.INSTANCE;
    for (int chainIdx = 0; chainIdx < chainLength; ++chainIdx) {
        List<OperatorStateHandle> chainOpParallelStatesBackend = parallelOpStatesBackend[chainIdx];
        List<OperatorStateHandle> chainOpParallelStatesStream = parallelOpStatesStream[chainIdx];
        partitionedParallelStatesBackend[chainIdx] = applyRepartitioner(opStateRepartitioner, chainOpParallelStatesBackend, oldParallelism, newParallelism);
        partitionedParallelStatesStream[chainIdx] = applyRepartitioner(opStateRepartitioner, chainOpParallelStatesStream, oldParallelism, newParallelism);
    }
    for (int subTaskIdx = 0; subTaskIdx < newParallelism; ++subTaskIdx) {
        // non-partitioned state
        ChainedStateHandle<StreamStateHandle> nonPartitionableState = null;
        if (oldParallelism == newParallelism) {
            if (taskState.getState(subTaskIdx) != null) {
                nonPartitionableState = taskState.getState(subTaskIdx).getLegacyOperatorState();
            }
        }
        // partitionable state
        @SuppressWarnings("unchecked") Collection<OperatorStateHandle>[] iab = new Collection[chainLength];
        @SuppressWarnings("unchecked") Collection<OperatorStateHandle>[] ias = new Collection[chainLength];
        List<Collection<OperatorStateHandle>> operatorStateFromBackend = Arrays.asList(iab);
        List<Collection<OperatorStateHandle>> operatorStateFromStream = Arrays.asList(ias);
        for (int chainIdx = 0; chainIdx < partitionedParallelStatesBackend.length; ++chainIdx) {
            List<Collection<OperatorStateHandle>> redistributedOpStateBackend = partitionedParallelStatesBackend[chainIdx];
            List<Collection<OperatorStateHandle>> redistributedOpStateStream = partitionedParallelStatesStream[chainIdx];
            if (redistributedOpStateBackend != null) {
                operatorStateFromBackend.set(chainIdx, redistributedOpStateBackend.get(subTaskIdx));
            }
            if (redistributedOpStateStream != null) {
                operatorStateFromStream.set(chainIdx, redistributedOpStateStream.get(subTaskIdx));
            }
        }
        Execution currentExecutionAttempt = executionJobVertex.getTaskVertices()[subTaskIdx].getCurrentExecutionAttempt();
        List<KeyGroupsStateHandle> newKeyedStatesBackend;
        List<KeyGroupsStateHandle> newKeyedStateStream;
        if (oldParallelism == newParallelism) {
            SubtaskState subtaskState = taskState.getState(subTaskIdx);
            if (subtaskState != null) {
                KeyGroupsStateHandle oldKeyedStatesBackend = subtaskState.getManagedKeyedState();
                KeyGroupsStateHandle oldKeyedStatesStream = subtaskState.getRawKeyedState();
                newKeyedStatesBackend = oldKeyedStatesBackend != null ? Collections.singletonList(oldKeyedStatesBackend) : null;
                newKeyedStateStream = oldKeyedStatesStream != null ? Collections.singletonList(oldKeyedStatesStream) : null;
            } else {
                newKeyedStatesBackend = null;
                newKeyedStateStream = null;
            }
        } else {
            KeyGroupRange subtaskKeyGroupIds = keyGroupPartitions.get(subTaskIdx);
            newKeyedStatesBackend = getKeyGroupsStateHandles(parallelKeyedStatesBackend, subtaskKeyGroupIds);
            newKeyedStateStream = getKeyGroupsStateHandles(parallelKeyedStateStream, subtaskKeyGroupIds);
        }
        TaskStateHandles taskStateHandles = new TaskStateHandles(nonPartitionableState, operatorStateFromBackend, operatorStateFromStream, newKeyedStatesBackend, newKeyedStateStream);
        currentExecutionAttempt.setInitialState(taskStateHandles);
    }
}
Also used : KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) Execution(org.apache.flink.runtime.executiongraph.Execution) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle)

Example 4 with TaskStateHandles

use of org.apache.flink.runtime.state.TaskStateHandles in project flink by apache.

the class TaskAsyncCallTest method createTask.

private static Task createTask() throws Exception {
    LibraryCacheManager libCache = mock(LibraryCacheManager.class);
    when(libCache.getClassLoader(any(JobID.class))).thenReturn(ClassLoader.getSystemClassLoader());
    ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
    ResultPartitionConsumableNotifier consumableNotifier = mock(ResultPartitionConsumableNotifier.class);
    PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class);
    Executor executor = mock(Executor.class);
    NetworkEnvironment networkEnvironment = mock(NetworkEnvironment.class);
    when(networkEnvironment.getResultPartitionManager()).thenReturn(partitionManager);
    when(networkEnvironment.getDefaultIOMode()).thenReturn(IOManager.IOMode.SYNC);
    when(networkEnvironment.createKvStateTaskRegistry(any(JobID.class), any(JobVertexID.class))).thenReturn(mock(TaskKvStateRegistry.class));
    JobInformation jobInformation = new JobInformation(new JobID(), "Job Name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList());
    TaskInformation taskInformation = new TaskInformation(new JobVertexID(), "Test Task", 1, 1, CheckpointsInOrderInvokable.class.getName(), new Configuration());
    return new Task(jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, new TaskStateHandles(), mock(MemoryManager.class), mock(IOManager.class), networkEnvironment, mock(BroadcastVariableManager.class), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), libCache, mock(FileCache.class), new TestingTaskManagerRuntimeInfo(), mock(TaskMetricGroup.class), consumableNotifier, partitionProducerStateChecker, executor);
}
Also used : StatefulTask(org.apache.flink.runtime.jobgraph.tasks.StatefulTask) Configuration(org.apache.flink.configuration.Configuration) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TaskKvStateRegistry(org.apache.flink.runtime.query.TaskKvStateRegistry) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Executor(java.util.concurrent.Executor) TestingTaskManagerRuntimeInfo(org.apache.flink.runtime.util.TestingTaskManagerRuntimeInfo) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) PartitionProducerStateChecker(org.apache.flink.runtime.io.network.netty.PartitionProducerStateChecker) ResultPartitionConsumableNotifier(org.apache.flink.runtime.io.network.partition.ResultPartitionConsumableNotifier) InputSplitProvider(org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider) JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) LibraryCacheManager(org.apache.flink.runtime.execution.librarycache.LibraryCacheManager) ResultPartitionManager(org.apache.flink.runtime.io.network.partition.ResultPartitionManager) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) FileCache(org.apache.flink.runtime.filecache.FileCache) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) NetworkEnvironment(org.apache.flink.runtime.io.network.NetworkEnvironment) JobID(org.apache.flink.api.common.JobID)

Example 5 with TaskStateHandles

use of org.apache.flink.runtime.state.TaskStateHandles in project flink by apache.

the class AsyncWaitOperatorTest method testStateSnapshotAndRestore.

@Test
public void testStateSnapshotAndRestore() throws Exception {
    final OneInputStreamTask<Integer, Integer> task = new OneInputStreamTask<>();
    final OneInputStreamTaskTestHarness<Integer, Integer> testHarness = new OneInputStreamTaskTestHarness<>(task, 1, 1, BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO);
    testHarness.setupOutputForSingletonOperatorChain();
    AsyncWaitOperator<Integer, Integer> operator = new AsyncWaitOperator<>(new LazyAsyncFunction(), TIMEOUT, 3, AsyncDataStream.OutputMode.ORDERED);
    final StreamConfig streamConfig = testHarness.getStreamConfig();
    streamConfig.setStreamOperator(operator);
    final AcknowledgeStreamMockEnvironment env = new AcknowledgeStreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.getExecutionConfig(), testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize);
    testHarness.invoke(env);
    testHarness.waitForTaskRunning();
    final long initialTime = 0L;
    testHarness.processElement(new StreamRecord<>(1, initialTime + 1));
    testHarness.processElement(new StreamRecord<>(2, initialTime + 2));
    testHarness.processElement(new StreamRecord<>(3, initialTime + 3));
    testHarness.processElement(new StreamRecord<>(4, initialTime + 4));
    testHarness.waitForInputProcessing();
    final long checkpointId = 1L;
    final long checkpointTimestamp = 1L;
    final CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, checkpointTimestamp);
    task.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forFullCheckpoint());
    env.getCheckpointLatch().await();
    assertEquals(checkpointId, env.getCheckpointId());
    LazyAsyncFunction.countDown();
    testHarness.endInput();
    testHarness.waitForTaskCompletion();
    // set the operator state from previous attempt into the restored one
    final OneInputStreamTask<Integer, Integer> restoredTask = new OneInputStreamTask<>();
    restoredTask.setInitialState(new TaskStateHandles(env.getCheckpointStateHandles()));
    final OneInputStreamTaskTestHarness<Integer, Integer> restoredTaskHarness = new OneInputStreamTaskTestHarness<>(restoredTask, BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO);
    restoredTaskHarness.setupOutputForSingletonOperatorChain();
    AsyncWaitOperator<Integer, Integer> restoredOperator = new AsyncWaitOperator<>(new MyAsyncFunction(), TIMEOUT, 6, AsyncDataStream.OutputMode.ORDERED);
    restoredTaskHarness.getStreamConfig().setStreamOperator(restoredOperator);
    restoredTaskHarness.invoke();
    restoredTaskHarness.waitForTaskRunning();
    restoredTaskHarness.processElement(new StreamRecord<>(5, initialTime + 5));
    restoredTaskHarness.processElement(new StreamRecord<>(6, initialTime + 6));
    restoredTaskHarness.processElement(new StreamRecord<>(7, initialTime + 7));
    // trigger the checkpoint while processing stream elements
    restoredTask.triggerCheckpoint(new CheckpointMetaData(checkpointId, checkpointTimestamp), CheckpointOptions.forFullCheckpoint());
    restoredTaskHarness.processElement(new StreamRecord<>(8, initialTime + 8));
    restoredTaskHarness.endInput();
    restoredTaskHarness.waitForTaskCompletion();
    ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
    expectedOutput.add(new StreamRecord<>(2, initialTime + 1));
    expectedOutput.add(new StreamRecord<>(4, initialTime + 2));
    expectedOutput.add(new StreamRecord<>(6, initialTime + 3));
    expectedOutput.add(new StreamRecord<>(8, initialTime + 4));
    expectedOutput.add(new StreamRecord<>(10, initialTime + 5));
    expectedOutput.add(new StreamRecord<>(12, initialTime + 6));
    expectedOutput.add(new StreamRecord<>(14, initialTime + 7));
    expectedOutput.add(new StreamRecord<>(16, initialTime + 8));
    // remove CheckpointBarrier which is not expected
    Iterator<Object> iterator = restoredTaskHarness.getOutput().iterator();
    while (iterator.hasNext()) {
        if (iterator.next() instanceof CheckpointBarrier) {
            iterator.remove();
        }
    }
    TestHarnessUtil.assertOutputEquals("StateAndRestored Test Output was not correct.", expectedOutput, restoredTaskHarness.getOutput());
}
Also used : OneInputStreamTask(org.apache.flink.streaming.runtime.tasks.OneInputStreamTask) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) CheckpointBarrier(org.apache.flink.runtime.io.network.api.CheckpointBarrier) OneInputStreamTaskTestHarness(org.apache.flink.streaming.runtime.tasks.OneInputStreamTaskTestHarness) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) MockInputSplitProvider(org.apache.flink.runtime.operators.testutils.MockInputSplitProvider) Test(org.junit.Test)

Aggregations

TaskStateHandles (org.apache.flink.runtime.state.TaskStateHandles)10 JobID (org.apache.flink.api.common.JobID)6 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)6 KeyGroupsStateHandle (org.apache.flink.runtime.state.KeyGroupsStateHandle)5 OperatorStateHandle (org.apache.flink.runtime.state.OperatorStateHandle)5 StreamStateHandle (org.apache.flink.runtime.state.StreamStateHandle)5 Collection (java.util.Collection)4 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)4 Configuration (org.apache.flink.configuration.Configuration)4 AllocationID (org.apache.flink.runtime.clusterframework.types.AllocationID)4 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)4 JobInformation (org.apache.flink.runtime.executiongraph.JobInformation)4 TaskInformation (org.apache.flink.runtime.executiongraph.TaskInformation)4 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Executor (java.util.concurrent.Executor)3 BroadcastVariableManager (org.apache.flink.runtime.broadcast.BroadcastVariableManager)3 FileCache (org.apache.flink.runtime.filecache.FileCache)3