use of org.apache.flink.runtime.state.StateObject in project flink by apache.
the class OperatorSnapshotFinalizerTest method testRunAndExtract.
/**
* Test that the runnable futures are executed and the result is correctly extracted.
*/
@Test
public void testRunAndExtract() throws Exception {
Random random = new Random(0x42);
KeyedStateHandle keyedTemplate = StateHandleDummyUtil.createNewKeyedStateHandle(new KeyGroupRange(0, 0));
OperatorStateHandle operatorTemplate = StateHandleDummyUtil.createNewOperatorStateHandle(2, random);
InputChannelStateHandle inputChannelTemplate = StateHandleDummyUtil.createNewInputChannelStateHandle(2, random);
ResultSubpartitionStateHandle resultSubpartitionTemplate = StateHandleDummyUtil.createNewResultSubpartitionStateHandle(2, random);
SnapshotResult<KeyedStateHandle> manKeyed = withLocalState(deepDummyCopy(keyedTemplate), deepDummyCopy(keyedTemplate));
SnapshotResult<KeyedStateHandle> rawKeyed = withLocalState(deepDummyCopy(keyedTemplate), deepDummyCopy(keyedTemplate));
SnapshotResult<OperatorStateHandle> manOper = withLocalState(deepDummyCopy(operatorTemplate), deepDummyCopy(operatorTemplate));
SnapshotResult<OperatorStateHandle> rawOper = withLocalState(deepDummyCopy(operatorTemplate), deepDummyCopy(operatorTemplate));
SnapshotResult<StateObjectCollection<InputChannelStateHandle>> inputChannel = withLocalState(singleton(deepDummyCopy(inputChannelTemplate)), singleton(deepDummyCopy(inputChannelTemplate)));
SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>> resultSubpartition = withLocalState(singleton(deepDummyCopy(resultSubpartitionTemplate)), singleton(deepDummyCopy(resultSubpartitionTemplate)));
OperatorSnapshotFutures snapshotFutures = new OperatorSnapshotFutures(new PseudoNotDoneFuture<>(manKeyed), new PseudoNotDoneFuture<>(rawKeyed), new PseudoNotDoneFuture<>(manOper), new PseudoNotDoneFuture<>(rawOper), new PseudoNotDoneFuture<>(inputChannel), new PseudoNotDoneFuture<>(resultSubpartition));
for (Future<?> f : snapshotFutures.getAllFutures()) {
assertFalse(f.isDone());
}
OperatorSnapshotFinalizer finalizer = new OperatorSnapshotFinalizer(snapshotFutures);
for (Future<?> f : snapshotFutures.getAllFutures()) {
assertTrue(f.isDone());
}
Map<SnapshotResult<?>, Function<OperatorSubtaskState, ? extends StateObject>> map = new HashMap<>();
map.put(manKeyed, headExtractor(OperatorSubtaskState::getManagedKeyedState));
map.put(rawKeyed, headExtractor(OperatorSubtaskState::getRawKeyedState));
map.put(manOper, headExtractor(OperatorSubtaskState::getManagedOperatorState));
map.put(rawOper, headExtractor(OperatorSubtaskState::getRawOperatorState));
map.put(inputChannel, OperatorSubtaskState::getInputChannelState);
map.put(resultSubpartition, OperatorSubtaskState::getResultSubpartitionState);
for (Map.Entry<SnapshotResult<?>, Function<OperatorSubtaskState, ? extends StateObject>> e : map.entrySet()) {
assertEquals(e.getKey().getJobManagerOwnedSnapshot(), e.getValue().apply(finalizer.getJobManagerOwnedState()));
}
for (Map.Entry<SnapshotResult<?>, Function<OperatorSubtaskState, ? extends StateObject>> e : map.entrySet()) {
assertEquals(e.getKey().getTaskLocalSnapshot(), e.getValue().apply(finalizer.getTaskLocalState()));
}
}
use of org.apache.flink.runtime.state.StateObject in project flink by apache.
the class StateHandleStoreUtilsTest method testSerializationOrDiscardWithDiscardFailure.
@Test
public void testSerializationOrDiscardWithDiscardFailure() throws Exception {
final Exception discardException = new IllegalStateException("Expected IllegalStateException that should be suppressed.");
final StateObject original = new FailingSerializationStateObject(() -> {
throw discardException;
});
try {
StateHandleStoreUtils.serializeOrDiscard(original);
fail("An IOException is expected to be thrown.");
} catch (IOException e) {
// IOException is expected
assertThat(e.getSuppressed().length, is(1));
assertThat(e.getSuppressed()[0], is(discardException));
}
}
use of org.apache.flink.runtime.state.StateObject in project flink by apache.
the class StateHandleStoreUtilsTest method testSerializeOrDiscardFailureHandling.
@Test
public void testSerializeOrDiscardFailureHandling() throws Exception {
final AtomicBoolean discardCalled = new AtomicBoolean(false);
final StateObject original = new FailingSerializationStateObject(() -> discardCalled.set(true));
try {
StateHandleStoreUtils.serializeOrDiscard(original);
fail("An IOException is expected to be thrown.");
} catch (IOException e) {
// IOException is expected
}
assertThat(discardCalled.get(), is(true));
}
use of org.apache.flink.runtime.state.StateObject in project flink by apache.
the class OperatorSnapshotFutures method cancel.
/**
* @return discarded state size (if available).
*/
public Tuple2<Long, Long> cancel() throws Exception {
List<Tuple2<Future<? extends StateObject>, String>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(getKeyedStateManagedFuture(), "managed keyed"));
pairs.add(new Tuple2<>(getKeyedStateRawFuture(), "managed operator"));
pairs.add(new Tuple2<>(getOperatorStateManagedFuture(), "raw keyed"));
pairs.add(new Tuple2<>(getOperatorStateRawFuture(), "raw operator"));
pairs.add(new Tuple2<>(getInputChannelStateFuture(), "input channel"));
pairs.add(new Tuple2<>(getResultSubpartitionStateFuture(), "result subpartition"));
final long[] sizeTuple = new long[2];
try (Closer closer = Closer.create()) {
for (Tuple2<Future<? extends StateObject>, String> pair : pairs) {
closer.register(() -> {
try {
Tuple2<Long, Long> tuple = discardStateFuture(pair.f0);
sizeTuple[0] += tuple.f0;
sizeTuple[1] += tuple.f1;
} catch (Exception e) {
throw new RuntimeException(String.format("Could not properly cancel %s state future", pair.f1), e);
}
});
}
}
return Tuple2.of(sizeTuple[0], sizeTuple[1]);
}
Aggregations