use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method assigningStateHandlesCanNotBeNull.
@Test
public void assigningStateHandlesCanNotBeNull() {
OperatorState state = new OperatorState(new OperatorID(), 1, MAX_P);
List<KeyedStateHandle> managedKeyedStateHandles = StateAssignmentOperation.getManagedKeyedStateHandles(state, KeyGroupRange.of(0, 1));
List<KeyedStateHandle> rawKeyedStateHandles = StateAssignmentOperation.getRawKeyedStateHandles(state, KeyGroupRange.of(0, 1));
assertThat(managedKeyedStateHandles, is(empty()));
assertThat(rawKeyedStateHandles, is(empty()));
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method testRepartitionSplitDistributeStates.
@Test
public void testRepartitionSplitDistributeStates() {
OperatorID operatorID = new OperatorID();
OperatorState operatorState = new OperatorState(operatorID, 2, 4);
Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap1 = new HashMap<>(1);
metaInfoMap1.put("t-1", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
OperatorStateHandle osh1 = new OperatorStreamStateHandle(metaInfoMap1, new ByteStreamStateHandle("test1", new byte[30]));
operatorState.putState(0, OperatorSubtaskState.builder().setManagedOperatorState(osh1).build());
Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap2 = new HashMap<>(1);
metaInfoMap2.put("t-2", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 15 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
OperatorStateHandle osh2 = new OperatorStreamStateHandle(metaInfoMap2, new ByteStreamStateHandle("test2", new byte[40]));
operatorState.putState(1, OperatorSubtaskState.builder().setManagedOperatorState(osh2).build());
verifyOneKindPartitionableStateRescale(operatorState, operatorID);
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method toExecutionVertices.
private Map<OperatorID, ExecutionJobVertex> toExecutionVertices(JobVertex... jobVertices) throws JobException, JobExecutionException {
JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(jobVertices);
ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).build();
return Arrays.stream(jobVertices).collect(Collectors.toMap(jobVertex -> jobVertex.getOperatorIDs().get(0).getGeneratedOperatorID(), jobVertex -> {
try {
return eg.getJobVertex(jobVertex.getID());
} catch (Exception e) {
throw new RuntimeException(e);
}
}));
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class CheckpointTestUtils method createOperatorStates.
/**
* Creates a random collection of OperatorState objects containing various types of state
* handles.
*
* @param basePath The basePath for savepoint, will be null for checkpoint.
* @param numAllRunningTaskStates Number of tasks belong to all running vertex.
* @param numPartlyFinishedTaskStates Number of tasks belong to partly finished vertex.
* @param numFullyFinishedTaskStates Number of tasks belong to fully finished vertex.
* @param numSubtasksPerTask Number of subtasks for each task.
*/
public static Collection<OperatorState> createOperatorStates(Random random, @Nullable String basePath, int numAllRunningTaskStates, int numPartlyFinishedTaskStates, int numFullyFinishedTaskStates, int numSubtasksPerTask) {
List<OperatorState> taskStates = new ArrayList<>(numAllRunningTaskStates + numPartlyFinishedTaskStates + numFullyFinishedTaskStates);
for (int stateIdx = 0; stateIdx < numAllRunningTaskStates; ++stateIdx) {
OperatorState taskState = new OperatorState(new OperatorID(), numSubtasksPerTask, 128);
randomlySetCoordinatorState(taskState, random);
randomlySetSubtaskState(taskState, IntStream.range(0, numSubtasksPerTask).toArray(), random, basePath);
taskStates.add(taskState);
}
for (int stateIdx = 0; stateIdx < numPartlyFinishedTaskStates; ++stateIdx) {
OperatorState taskState = new OperatorState(new OperatorID(), numSubtasksPerTask, 128);
randomlySetCoordinatorState(taskState, random);
randomlySetSubtaskState(taskState, IntStream.range(0, numSubtasksPerTask / 2).toArray(), random, basePath);
IntStream.range(numSubtasksPerTask / 2, numSubtasksPerTask).forEach(index -> taskState.putState(index, FinishedOperatorSubtaskState.INSTANCE));
taskStates.add(taskState);
}
for (int stateIdx = 0; stateIdx < numFullyFinishedTaskStates; ++stateIdx) {
taskStates.add(new FullyFinishedOperatorState(new OperatorID(), numSubtasksPerTask, 128));
}
return taskStates;
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class CheckpointStateRestoreTest method testNonRestoredState.
/**
* Tests that the allow non restored state flag is correctly handled.
*
* <p>The flag only applies for state that is part of the checkpoint.
*/
@Test
public void testNonRestoredState() throws Exception {
// --- (1) Create tasks to restore checkpoint with ---
JobVertexID jobVertexId1 = new JobVertexID();
JobVertexID jobVertexId2 = new JobVertexID();
OperatorID operatorId1 = OperatorID.fromJobVertexID(jobVertexId1);
// 1st JobVertex
ExecutionVertex vertex11 = mockExecutionVertex(mockExecution(), jobVertexId1, 0, 3);
ExecutionVertex vertex12 = mockExecutionVertex(mockExecution(), jobVertexId1, 1, 3);
ExecutionVertex vertex13 = mockExecutionVertex(mockExecution(), jobVertexId1, 2, 3);
// 2nd JobVertex
ExecutionVertex vertex21 = mockExecutionVertex(mockExecution(), jobVertexId2, 0, 2);
ExecutionVertex vertex22 = mockExecutionVertex(mockExecution(), jobVertexId2, 1, 2);
ExecutionJobVertex jobVertex1 = mockExecutionJobVertex(jobVertexId1, new ExecutionVertex[] { vertex11, vertex12, vertex13 });
ExecutionJobVertex jobVertex2 = mockExecutionJobVertex(jobVertexId2, new ExecutionVertex[] { vertex21, vertex22 });
Set<ExecutionJobVertex> tasks = new HashSet<>();
tasks.add(jobVertex1);
tasks.add(jobVertex2);
CheckpointCoordinator coord = new CheckpointCoordinatorBuilder().build();
// --- (2) Checkpoint misses state for a jobVertex (should work) ---
Map<OperatorID, OperatorState> checkpointTaskStates = new HashMap<>();
{
OperatorState taskState = new OperatorState(operatorId1, 3, 3);
taskState.putState(0, OperatorSubtaskState.builder().build());
taskState.putState(1, OperatorSubtaskState.builder().build());
taskState.putState(2, OperatorSubtaskState.builder().build());
checkpointTaskStates.put(operatorId1, taskState);
}
CompletedCheckpoint checkpoint = new CompletedCheckpoint(new JobID(), 0, 1, 2, new HashMap<>(checkpointTaskStates), Collections.<MasterState>emptyList(), CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), new TestCompletedCheckpointStorageLocation());
coord.getCheckpointStore().addCheckpointAndSubsumeOldestOne(checkpoint, new CheckpointsCleaner(), () -> {
});
assertTrue(coord.restoreLatestCheckpointedStateToAll(tasks, false));
assertTrue(coord.restoreLatestCheckpointedStateToAll(tasks, true));
// --- (3) JobVertex missing for task state that is part of the checkpoint ---
JobVertexID newJobVertexID = new JobVertexID();
OperatorID newOperatorID = OperatorID.fromJobVertexID(newJobVertexID);
// There is no task for this
{
OperatorState taskState = new OperatorState(newOperatorID, 1, 1);
taskState.putState(0, OperatorSubtaskState.builder().build());
checkpointTaskStates.put(newOperatorID, taskState);
}
checkpoint = new CompletedCheckpoint(new JobID(), 1, 2, 3, new HashMap<>(checkpointTaskStates), Collections.<MasterState>emptyList(), CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), new TestCompletedCheckpointStorageLocation());
coord.getCheckpointStore().addCheckpointAndSubsumeOldestOne(checkpoint, new CheckpointsCleaner(), () -> {
});
// (i) Allow non restored state (should succeed)
final boolean restored = coord.restoreLatestCheckpointedStateToAll(tasks, true);
assertTrue(restored);
// (ii) Don't allow non restored state (should fail)
try {
coord.restoreLatestCheckpointedStateToAll(tasks, false);
fail("Did not throw the expected Exception.");
} catch (IllegalStateException ignored) {
}
}
Aggregations