use of org.apache.flink.runtime.state.InputChannelStateHandle in project flink by apache.
the class SequentialChannelStateReaderImplTest method writePermuted.
private Tuple2<List<InputChannelStateHandle>, List<ResultSubpartitionStateHandle>> writePermuted(Map<InputChannelInfo, List<byte[]>> inputChannels, Map<ResultSubpartitionInfo, List<byte[]>> resultSubpartitions) throws IOException {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
DataOutputStream dataStream = new DataOutputStream(out);
serializer.writeHeader(dataStream);
Map<InputChannelInfo, List<Long>> icOffsets = write(dataStream, permute(inputChannels));
Map<ResultSubpartitionInfo, List<Long>> rsOffsets = write(dataStream, permute(resultSubpartitions));
ByteStreamStateHandle streamStateHandle = new ByteStreamStateHandle("", out.toByteArray());
return Tuple2.of(icOffsets.entrySet().stream().map(e -> new InputChannelStateHandle(e.getKey(), streamStateHandle, e.getValue())).collect(toList()), rsOffsets.entrySet().stream().map(e -> new ResultSubpartitionStateHandle(e.getKey(), streamStateHandle, e.getValue())).collect(toList()));
}
}
use of org.apache.flink.runtime.state.InputChannelStateHandle in project flink by apache.
the class TaskStateSnapshotTest method testSizeIncludesChannelState.
@Test
public void testSizeIncludesChannelState() {
final Random random = new Random();
InputChannelStateHandle inputChannelStateHandle = createNewInputChannelStateHandle(10, random);
ResultSubpartitionStateHandle resultSubpartitionStateHandle = createNewResultSubpartitionStateHandle(10, random);
final TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(Collections.singletonMap(new OperatorID(), OperatorSubtaskState.builder().setInputChannelState(singleton(inputChannelStateHandle)).setResultSubpartitionState(singleton(resultSubpartitionStateHandle)).build()));
Assert.assertEquals(inputChannelStateHandle.getStateSize() + resultSubpartitionStateHandle.getStateSize(), taskStateSnapshot.getStateSize());
Assert.assertTrue(taskStateSnapshot.hasState());
}
use of org.apache.flink.runtime.state.InputChannelStateHandle in project flink by apache.
the class OperatorSnapshotUtil method readStateHandle.
public static OperatorSubtaskState readStateHandle(String path) throws IOException, ClassNotFoundException {
FileInputStream in = new FileInputStream(path);
try (DataInputStream dis = new DataInputStream(in)) {
// required for backwards compatibility.
final int v = dis.readInt();
// still required for compatibility to consume the bytes.
MetadataV3Serializer.deserializeStreamStateHandle(dis);
List<OperatorStateHandle> rawOperatorState = null;
int numRawOperatorStates = dis.readInt();
if (numRawOperatorStates >= 0) {
rawOperatorState = new ArrayList<>();
for (int i = 0; i < numRawOperatorStates; i++) {
OperatorStateHandle operatorState = MetadataV3Serializer.deserializeOperatorStateHandleUtil(dis);
rawOperatorState.add(operatorState);
}
}
List<OperatorStateHandle> managedOperatorState = null;
int numManagedOperatorStates = dis.readInt();
if (numManagedOperatorStates >= 0) {
managedOperatorState = new ArrayList<>();
for (int i = 0; i < numManagedOperatorStates; i++) {
OperatorStateHandle operatorState = MetadataV3Serializer.deserializeOperatorStateHandleUtil(dis);
managedOperatorState.add(operatorState);
}
}
List<KeyedStateHandle> rawKeyedState = null;
int numRawKeyedStates = dis.readInt();
if (numRawKeyedStates >= 0) {
rawKeyedState = new ArrayList<>();
for (int i = 0; i < numRawKeyedStates; i++) {
KeyedStateHandle keyedState = MetadataV3Serializer.deserializeKeyedStateHandleUtil(dis);
rawKeyedState.add(keyedState);
}
}
List<KeyedStateHandle> managedKeyedState = null;
int numManagedKeyedStates = dis.readInt();
if (numManagedKeyedStates >= 0) {
managedKeyedState = new ArrayList<>();
for (int i = 0; i < numManagedKeyedStates; i++) {
KeyedStateHandle keyedState = MetadataV3Serializer.deserializeKeyedStateHandleUtil(dis);
managedKeyedState.add(keyedState);
}
}
final StateObjectCollection<InputChannelStateHandle> inputChannelStateHandles = v == MetadataV3Serializer.VERSION ? MetadataV3Serializer.deserializeInputChannelStateHandle(dis) : StateObjectCollection.empty();
final StateObjectCollection<ResultSubpartitionStateHandle> resultSubpartitionStateHandles = v == MetadataV3Serializer.VERSION ? MetadataV3Serializer.INSTANCE.deserializeResultSubpartitionStateHandle(dis) : StateObjectCollection.empty();
return OperatorSubtaskState.builder().setManagedOperatorState(new StateObjectCollection<>(managedOperatorState)).setRawOperatorState(new StateObjectCollection<>(rawOperatorState)).setManagedKeyedState(new StateObjectCollection<>(managedKeyedState)).setRawKeyedState(new StateObjectCollection<>(rawKeyedState)).setInputChannelState(inputChannelStateHandles).setResultSubpartitionState(resultSubpartitionStateHandles).build();
}
}
use of org.apache.flink.runtime.state.InputChannelStateHandle in project flink by apache.
the class OperatorSnapshotUtil method writeStateHandle.
public static void writeStateHandle(OperatorSubtaskState state, String path) throws IOException {
FileOutputStream out = new FileOutputStream(path);
try (DataOutputStream dos = new DataOutputStream(out)) {
// required for backwards compatibility.
dos.writeInt(MetadataV3Serializer.VERSION);
// still required for compatibility
MetadataV3Serializer.serializeStreamStateHandle(null, dos);
Collection<OperatorStateHandle> rawOperatorState = state.getRawOperatorState();
if (rawOperatorState != null) {
dos.writeInt(rawOperatorState.size());
for (OperatorStateHandle operatorStateHandle : rawOperatorState) {
MetadataV3Serializer.serializeOperatorStateHandleUtil(operatorStateHandle, dos);
}
} else {
// this means no states, not even an empty list
dos.writeInt(-1);
}
Collection<OperatorStateHandle> managedOperatorState = state.getManagedOperatorState();
if (managedOperatorState != null) {
dos.writeInt(managedOperatorState.size());
for (OperatorStateHandle operatorStateHandle : managedOperatorState) {
MetadataV3Serializer.serializeOperatorStateHandleUtil(operatorStateHandle, dos);
}
} else {
// this means no states, not even an empty list
dos.writeInt(-1);
}
Collection<KeyedStateHandle> rawKeyedState = state.getRawKeyedState();
if (rawKeyedState != null) {
dos.writeInt(rawKeyedState.size());
for (KeyedStateHandle keyedStateHandle : rawKeyedState) {
MetadataV3Serializer.serializeKeyedStateHandleUtil(keyedStateHandle, dos);
}
} else {
// this means no operator states, not even an empty list
dos.writeInt(-1);
}
Collection<KeyedStateHandle> managedKeyedState = state.getManagedKeyedState();
if (managedKeyedState != null) {
dos.writeInt(managedKeyedState.size());
for (KeyedStateHandle keyedStateHandle : managedKeyedState) {
MetadataV3Serializer.serializeKeyedStateHandleUtil(keyedStateHandle, dos);
}
} else {
// this means no operator states, not even an empty list
dos.writeInt(-1);
}
Collection<InputChannelStateHandle> inputChannelStateHandles = state.getInputChannelState();
dos.writeInt(inputChannelStateHandles.size());
for (InputChannelStateHandle inputChannelStateHandle : inputChannelStateHandles) {
MetadataV3Serializer.INSTANCE.serializeInputChannelStateHandle(inputChannelStateHandle, dos);
}
Collection<ResultSubpartitionStateHandle> resultSubpartitionStateHandles = state.getResultSubpartitionState();
dos.writeInt(inputChannelStateHandles.size());
for (ResultSubpartitionStateHandle resultSubpartitionStateHandle : resultSubpartitionStateHandles) {
MetadataV3Serializer.INSTANCE.serializeResultSubpartitionStateHandle(resultSubpartitionStateHandle, dos);
}
dos.flush();
}
}
use of org.apache.flink.runtime.state.InputChannelStateHandle in project flink by apache.
the class StateAssignmentOperation method reDistributeInputChannelStates.
public void reDistributeInputChannelStates(TaskStateAssignment stateAssignment) {
if (!stateAssignment.hasInputState) {
return;
}
checkForUnsupportedToplogyChanges(stateAssignment.oldState, OperatorSubtaskState::getInputChannelState, stateAssignment.inputOperatorID);
final ExecutionJobVertex executionJobVertex = stateAssignment.executionJobVertex;
final List<IntermediateResult> inputs = executionJobVertex.getInputs();
// check for rescaling: no rescaling = simple reassignment
final OperatorState inputState = stateAssignment.oldState.get(stateAssignment.inputOperatorID);
final List<List<InputChannelStateHandle>> inputOperatorState = splitBySubtasks(inputState, OperatorSubtaskState::getInputChannelState);
if (inputState.getParallelism() == executionJobVertex.getParallelism()) {
stateAssignment.inputChannelStates.putAll(toInstanceMap(stateAssignment.inputOperatorID, inputOperatorState));
return;
}
// subtask 0 + 2
for (int gateIndex = 0; gateIndex < inputs.size(); gateIndex++) {
final RescaleMappings mapping = stateAssignment.getInputMapping(gateIndex).getRescaleMappings();
final List<List<InputChannelStateHandle>> gateState = inputs.size() == 1 ? inputOperatorState : getPartitionState(inputOperatorState, InputChannelInfo::getGateIdx, gateIndex);
final MappingBasedRepartitioner<InputChannelStateHandle> repartitioner = new MappingBasedRepartitioner(mapping);
final Map<OperatorInstanceID, List<InputChannelStateHandle>> repartitioned = applyRepartitioner(stateAssignment.inputOperatorID, repartitioner, gateState, inputOperatorState.size(), stateAssignment.newParallelism);
addToSubtasks(stateAssignment.inputChannelStates, repartitioned);
}
}
Aggregations