use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method testChannelStateAssignmentDownscalingTwoDifferentGates.
@Test
public void testChannelStateAssignmentDownscalingTwoDifferentGates() throws JobException, JobExecutionException {
JobVertex upstream1 = createJobVertex(new OperatorID(), 2);
JobVertex upstream2 = createJobVertex(new OperatorID(), 2);
JobVertex downstream = createJobVertex(new OperatorID(), 2);
List<OperatorID> operatorIds = Stream.of(upstream1, upstream2, downstream).map(v -> v.getOperatorIDs().get(0).getGeneratedOperatorID()).collect(Collectors.toList());
Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, 3);
connectVertices(upstream1, downstream, ARBITRARY, RANGE);
connectVertices(upstream2, downstream, ROUND_ROBIN, ROUND_ROBIN);
Map<OperatorID, ExecutionJobVertex> vertices = toExecutionVertices(upstream1, upstream2, downstream);
new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false).assignStates();
assertEquals(new InflightDataRescalingDescriptor(array(gate(to(0, 1), mappings(to(0, 2), to(1)), set(1), RESCALING), gate(to(0, 2), mappings(to(0, 2), to(1)), emptySet(), RESCALING))), getAssignedState(vertices.get(operatorIds.get(2)), operatorIds.get(2), 0).getInputRescalingDescriptor());
assertEquals(new InflightDataRescalingDescriptor(array(gate(to(0, 1), mappings(to(0, 2), to(1)), set(1), RESCALING), gate(to(0, 2), mappings(to(0, 2), to(1)), emptySet(), RESCALING))), getAssignedState(vertices.get(operatorIds.get(2)), operatorIds.get(2), 0).getInputRescalingDescriptor());
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method testReDistributeCombinedPartitionableStates.
/**
* Verify repartition logic on partitionable states with all modes.
*/
@Test
public void testReDistributeCombinedPartitionableStates() {
OperatorID operatorID = new OperatorID();
OperatorState operatorState = new OperatorState(operatorID, 2, 4);
Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap1 = new HashMap<>(6);
metaInfoMap1.put("t-1", new OperatorStateHandle.StateMetaInfo(new long[] { 0 }, OperatorStateHandle.Mode.UNION));
metaInfoMap1.put("t-2", new OperatorStateHandle.StateMetaInfo(new long[] { 22, 44 }, OperatorStateHandle.Mode.UNION));
metaInfoMap1.put("t-3", new OperatorStateHandle.StateMetaInfo(new long[] { 52, 63 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
metaInfoMap1.put("t-4", new OperatorStateHandle.StateMetaInfo(new long[] { 67, 74, 75 }, OperatorStateHandle.Mode.BROADCAST));
metaInfoMap1.put("t-5", new OperatorStateHandle.StateMetaInfo(new long[] { 77, 88, 92 }, OperatorStateHandle.Mode.BROADCAST));
metaInfoMap1.put("t-6", new OperatorStateHandle.StateMetaInfo(new long[] { 101, 123, 127 }, OperatorStateHandle.Mode.BROADCAST));
OperatorStateHandle osh1 = new OperatorStreamStateHandle(metaInfoMap1, new ByteStreamStateHandle("test1", new byte[130]));
operatorState.putState(0, OperatorSubtaskState.builder().setManagedOperatorState(osh1).build());
Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap2 = new HashMap<>(3);
metaInfoMap2.put("t-1", new OperatorStateHandle.StateMetaInfo(new long[] { 0 }, OperatorStateHandle.Mode.UNION));
metaInfoMap2.put("t-4", new OperatorStateHandle.StateMetaInfo(new long[] { 20, 27, 28 }, OperatorStateHandle.Mode.BROADCAST));
metaInfoMap2.put("t-5", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 44, 48 }, OperatorStateHandle.Mode.BROADCAST));
metaInfoMap2.put("t-6", new OperatorStateHandle.StateMetaInfo(new long[] { 57, 79, 83 }, OperatorStateHandle.Mode.BROADCAST));
OperatorStateHandle osh2 = new OperatorStreamStateHandle(metaInfoMap2, new ByteStreamStateHandle("test2", new byte[86]));
operatorState.putState(1, OperatorSubtaskState.builder().setManagedOperatorState(osh2).build());
// rescale up case, parallelism 2 --> 3
verifyCombinedPartitionableStateRescale(operatorState, operatorID, 2, 3);
// rescale down case, parallelism 2 --> 1
verifyCombinedPartitionableStateRescale(operatorState, operatorID, 2, 1);
// not rescale
verifyCombinedPartitionableStateRescale(operatorState, operatorID, 2, 2);
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method testChannelStateAssignmentUpscaling.
@Test
public void testChannelStateAssignmentUpscaling() throws JobException, JobExecutionException {
List<OperatorID> operatorIds = buildOperatorIds(2);
Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, 2);
Map<OperatorID, ExecutionJobVertex> vertices = buildVertices(operatorIds, 3, RANGE, ROUND_ROBIN);
new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false).assignStates();
for (OperatorID operatorId : operatorIds) {
// input is range partitioned, so there is an overlap
assertState(vertices, operatorId, states, 0, OperatorSubtaskState::getInputChannelState, 0);
assertState(vertices, operatorId, states, 1, OperatorSubtaskState::getInputChannelState, 0, 1);
assertState(vertices, operatorId, states, 2, OperatorSubtaskState::getInputChannelState, 1);
// output is round robin redistributed
assertState(vertices, operatorId, states, 0, OperatorSubtaskState::getResultSubpartitionState, 0);
assertState(vertices, operatorId, states, 1, OperatorSubtaskState::getResultSubpartitionState, 1);
assertState(vertices, operatorId, states, 2, OperatorSubtaskState::getResultSubpartitionState);
}
assertEquals(rescalingDescriptor(to(0), array(mappings(to(0), to(0, 1), to(1))), set()), getAssignedState(vertices.get(operatorIds.get(0)), operatorIds.get(0), 0).getOutputRescalingDescriptor());
assertEquals(rescalingDescriptor(to(1), array(mappings(to(0), to(0, 1), to(1))), set()), getAssignedState(vertices.get(operatorIds.get(0)), operatorIds.get(0), 1).getOutputRescalingDescriptor());
// unmapped subtask index, so nothing to do
assertEquals(InflightDataRescalingDescriptor.NO_RESCALE, getAssignedState(vertices.get(operatorIds.get(0)), operatorIds.get(0), 2).getOutputRescalingDescriptor());
assertEquals(rescalingDescriptor(to(0), array(mappings(to(0), to(1), to())), set(0, 1)), getAssignedState(vertices.get(operatorIds.get(1)), operatorIds.get(1), 0).getInputRescalingDescriptor());
assertEquals(rescalingDescriptor(to(0, 1), array(mappings(to(0), to(1), to())), set(0, 1)), getAssignedState(vertices.get(operatorIds.get(1)), operatorIds.get(1), 1).getInputRescalingDescriptor());
assertEquals(rescalingDescriptor(to(1), array(mappings(to(0), to(1), to())), set(0, 1)), getAssignedState(vertices.get(operatorIds.get(1)), operatorIds.get(1), 2).getInputRescalingDescriptor());
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method testChannelStateAssignmentStability.
/**
* Check that channel and operator states are assigned to the same tasks on recovery.
*/
@Test
public void testChannelStateAssignmentStability() throws JobException, JobExecutionException {
// note: each operator is places into a separate vertex
int numOperators = 10;
int numSubTasks = 100;
List<OperatorID> operatorIds = buildOperatorIds(numOperators);
Map<OperatorID, ExecutionJobVertex> vertices = buildVertices(operatorIds, numSubTasks, RANGE, ROUND_ROBIN);
Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, numSubTasks);
new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false).assignStates();
for (OperatorID operatorId : operatorIds) {
for (int subtaskIdx = 0; subtaskIdx < numSubTasks; subtaskIdx++) {
Assert.assertEquals(states.get(operatorId).getState(subtaskIdx), getAssignedState(vertices.get(operatorId), operatorId, subtaskIdx));
}
}
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StateAssignmentOperationTest method testRepartitionBroadcastState.
@Test
public void testRepartitionBroadcastState() {
OperatorID operatorID = new OperatorID();
OperatorState operatorState = new OperatorState(operatorID, 2, 4);
Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap1 = new HashMap<>(2);
metaInfoMap1.put("t-5", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10, 20 }, OperatorStateHandle.Mode.BROADCAST));
metaInfoMap1.put("t-6", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 40, 50 }, OperatorStateHandle.Mode.BROADCAST));
OperatorStateHandle osh1 = new OperatorStreamStateHandle(metaInfoMap1, new ByteStreamStateHandle("test1", new byte[60]));
operatorState.putState(0, OperatorSubtaskState.builder().setManagedOperatorState(osh1).build());
Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap2 = new HashMap<>(2);
metaInfoMap2.put("t-5", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10, 20 }, OperatorStateHandle.Mode.BROADCAST));
metaInfoMap2.put("t-6", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 40, 50 }, OperatorStateHandle.Mode.BROADCAST));
OperatorStateHandle osh2 = new OperatorStreamStateHandle(metaInfoMap2, new ByteStreamStateHandle("test2", new byte[60]));
operatorState.putState(1, OperatorSubtaskState.builder().setManagedOperatorState(osh2).build());
verifyOneKindPartitionableStateRescale(operatorState, operatorID);
}
Aggregations