use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class MultiStreamStateHandleTest method testLinearRead.
@Test
public void testLinearRead() throws IOException {
MultiStreamStateHandle multiStreamStateHandle = new MultiStreamStateHandle(streamStateHandles);
try (FSDataInputStream in = multiStreamStateHandle.openInputStream()) {
for (int i = 0; i < TEST_DATA_LENGTH; ++i) {
assertEquals(i, in.getPos());
assertEquals(testData[i], in.read());
}
assertEquals(-1, in.read());
assertEquals(TEST_DATA_LENGTH, in.getPos());
assertEquals(-1, in.read());
assertEquals(TEST_DATA_LENGTH, in.getPos());
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class CheckpointCoordinatorTestingUtils method collectResult.
static void collectResult(int opIdx, OperatorStateHandle operatorStateHandle, List<String> resultCollector) throws Exception {
try (FSDataInputStream in = operatorStateHandle.openInputStream()) {
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> entry : operatorStateHandle.getStateNameToPartitionOffsets().entrySet()) {
for (long offset : entry.getValue().getOffsets()) {
in.seek(offset);
Integer state = InstantiationUtil.deserializeObject(in, Thread.currentThread().getContextClassLoader());
resultCollector.add(opIdx + " : " + entry.getKey() + " : " + state);
}
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class CheckpointCoordinatorTestingUtils method compareKeyedState.
static void compareKeyedState(Collection<KeyGroupsStateHandle> expectPartitionedKeyGroupState, Collection<? extends KeyedStateHandle> actualPartitionedKeyGroupState) throws Exception {
KeyGroupsStateHandle expectedHeadOpKeyGroupStateHandle = expectPartitionedKeyGroupState.iterator().next();
int expectedTotalKeyGroups = expectedHeadOpKeyGroupStateHandle.getKeyGroupRange().getNumberOfKeyGroups();
int actualTotalKeyGroups = 0;
for (KeyedStateHandle keyedStateHandle : actualPartitionedKeyGroupState) {
assertTrue(keyedStateHandle instanceof KeyGroupsStateHandle);
actualTotalKeyGroups += keyedStateHandle.getKeyGroupRange().getNumberOfKeyGroups();
}
assertEquals(expectedTotalKeyGroups, actualTotalKeyGroups);
try (FSDataInputStream inputStream = expectedHeadOpKeyGroupStateHandle.openInputStream()) {
for (int groupId : expectedHeadOpKeyGroupStateHandle.getKeyGroupRange()) {
long offset = expectedHeadOpKeyGroupStateHandle.getOffsetForKeyGroup(groupId);
inputStream.seek(offset);
int expectedKeyGroupState = InstantiationUtil.deserializeObject(inputStream, Thread.currentThread().getContextClassLoader());
for (KeyedStateHandle oneActualKeyedStateHandle : actualPartitionedKeyGroupState) {
assertTrue(oneActualKeyedStateHandle instanceof KeyGroupsStateHandle);
KeyGroupsStateHandle oneActualKeyGroupStateHandle = (KeyGroupsStateHandle) oneActualKeyedStateHandle;
if (oneActualKeyGroupStateHandle.getKeyGroupRange().contains(groupId)) {
long actualOffset = oneActualKeyGroupStateHandle.getOffsetForKeyGroup(groupId);
try (FSDataInputStream actualInputStream = oneActualKeyGroupStateHandle.openInputStream()) {
actualInputStream.seek(actualOffset);
int actualGroupState = InstantiationUtil.deserializeObject(actualInputStream, Thread.currentThread().getContextClassLoader());
assertEquals(expectedKeyGroupState, actualGroupState);
}
}
}
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class OperatorStateRestoreOperation method restore.
@Override
public Void restore() throws Exception {
if (stateHandles.isEmpty()) {
return null;
}
for (OperatorStateHandle stateHandle : stateHandles) {
if (stateHandle == null) {
continue;
}
FSDataInputStream in = stateHandle.openInputStream();
closeStreamOnCancelRegistry.registerCloseable(in);
ClassLoader restoreClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(userClassloader);
OperatorBackendSerializationProxy backendSerializationProxy = new OperatorBackendSerializationProxy(userClassloader);
backendSerializationProxy.read(new DataInputViewStreamWrapper(in));
List<StateMetaInfoSnapshot> restoredOperatorMetaInfoSnapshots = backendSerializationProxy.getOperatorStateMetaInfoSnapshots();
// Recreate all PartitionableListStates from the meta info
for (StateMetaInfoSnapshot restoredSnapshot : restoredOperatorMetaInfoSnapshots) {
final RegisteredOperatorStateBackendMetaInfo<?> restoredMetaInfo = new RegisteredOperatorStateBackendMetaInfo<>(restoredSnapshot);
if (restoredMetaInfo.getPartitionStateSerializer() instanceof UnloadableDummyTypeSerializer) {
throw new IOException("Unable to restore operator state [" + restoredSnapshot.getName() + "]." + " The previous typeSerializer of the operator state must be present; the typeSerializer could" + " have been removed from the classpath, or its implementation have changed and could" + " not be loaded. This is a temporary restriction that will be fixed in future versions.");
}
PartitionableListState<?> listState = registeredOperatorStates.get(restoredSnapshot.getName());
if (null == listState) {
listState = new PartitionableListState<>(restoredMetaInfo);
registeredOperatorStates.put(listState.getStateMetaInfo().getName(), listState);
} else {
// TODO with eager state registration in place, check here for
// typeSerializer migration strategies
}
}
// ... and then get back the broadcast state.
List<StateMetaInfoSnapshot> restoredBroadcastMetaInfoSnapshots = backendSerializationProxy.getBroadcastStateMetaInfoSnapshots();
for (StateMetaInfoSnapshot restoredSnapshot : restoredBroadcastMetaInfoSnapshots) {
final RegisteredBroadcastStateBackendMetaInfo<?, ?> restoredMetaInfo = new RegisteredBroadcastStateBackendMetaInfo<>(restoredSnapshot);
if (restoredMetaInfo.getKeySerializer() instanceof UnloadableDummyTypeSerializer || restoredMetaInfo.getValueSerializer() instanceof UnloadableDummyTypeSerializer) {
throw new IOException("Unable to restore broadcast state [" + restoredSnapshot.getName() + "]." + " The previous key and value serializers of the state must be present; the serializers could" + " have been removed from the classpath, or their implementations have changed and could" + " not be loaded. This is a temporary restriction that will be fixed in future versions.");
}
BackendWritableBroadcastState<?, ?> broadcastState = registeredBroadcastStates.get(restoredSnapshot.getName());
if (broadcastState == null) {
broadcastState = new HeapBroadcastState<>(restoredMetaInfo);
registeredBroadcastStates.put(broadcastState.getStateMetaInfo().getName(), broadcastState);
} else {
// TODO with eager state registration in place, check here for
// typeSerializer migration strategies
}
}
// Restore all the states
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> nameToOffsets : stateHandle.getStateNameToPartitionOffsets().entrySet()) {
final String stateName = nameToOffsets.getKey();
PartitionableListState<?> listStateForName = registeredOperatorStates.get(stateName);
if (listStateForName == null) {
BackendWritableBroadcastState<?, ?> broadcastStateForName = registeredBroadcastStates.get(stateName);
Preconditions.checkState(broadcastStateForName != null, "Found state without " + "corresponding meta info: " + stateName);
deserializeBroadcastStateValues(broadcastStateForName, in, nameToOffsets.getValue());
} else {
deserializeOperatorStateValues(listStateForName, in, nameToOffsets.getValue());
}
}
} finally {
Thread.currentThread().setContextClassLoader(restoreClassLoader);
if (closeStreamOnCancelRegistry.unregisterCloseable(in)) {
IOUtils.closeQuietly(in);
}
}
}
return null;
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class CompactOperator method doSingleFileMove.
private boolean doSingleFileMove(Path src, Path dst) throws IOException {
// We can not rename, because we need to keep original file for failover
RecoverableWriter writer;
try {
writer = fileSystem.createRecoverableWriter();
} catch (UnsupportedOperationException ignore) {
// writing.
return false;
}
RecoverableFsDataOutputStream out = writer.open(dst);
try (FSDataInputStream in = fileSystem.open(src)) {
IOUtils.copyBytes(in, out, false);
} catch (Throwable t) {
out.close();
throw t;
}
out.closeForCommit().commit();
return true;
}
Aggregations