use of org.apache.flink.runtime.operators.testutils.ExpectedTestException in project flink by apache.
the class ZooKeeperCompletedCheckpointStoreTest method testRecoverFailsIfDownloadFails.
@Test
public void testRecoverFailsIfDownloadFails() {
final Configuration configuration = new Configuration();
configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperResource.getConnectString());
final List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> checkpointsInZk = new ArrayList<>();
final ZooKeeperStateHandleStore<CompletedCheckpoint> checkpointsInZooKeeper = new ZooKeeperStateHandleStore<CompletedCheckpoint>(ZooKeeperUtils.startCuratorFramework(configuration, NoOpFatalErrorHandler.INSTANCE).asCuratorFramework(), new TestingRetrievableStateStorageHelper<>()) {
@Override
public List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> getAllAndLock() {
return checkpointsInZk;
}
};
checkpointsInZk.add(createHandle(1, id -> {
throw new ExpectedTestException();
}));
final Exception exception = assertThrows(Exception.class, () -> DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints(checkpointsInZooKeeper, zooKeeperCheckpointStoreUtil));
assertThat(exception, FlinkMatchers.containsCause(ExpectedTestException.class));
}
use of org.apache.flink.runtime.operators.testutils.ExpectedTestException in project flink by apache.
the class StreamOperatorStateHandlerTest method testFailingBackendSnapshotMethod.
/**
* Tests that a failing snapshot method call to the keyed state backend will trigger the closing
* of the StateSnapshotContextSynchronousImpl and the cancellation of the
* OperatorSnapshotResult. The latter is supposed to also cancel all assigned futures.
*/
@Test
public void testFailingBackendSnapshotMethod() throws Exception {
final long checkpointId = 42L;
final long timestamp = 1L;
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) {
RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateManagedFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateRawFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateManagedFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateRawFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<StateObjectCollection<InputChannelStateHandle>>> inputChannelStateFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>>> resultSubpartitionStateFuture = new CancelableFuture<>();
OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures(keyedStateManagedFuture, keyedStateRawFuture, operatorStateManagedFuture, operatorStateRawFuture, inputChannelStateFuture, resultSubpartitionStateFuture);
StateSnapshotContextSynchronousImpl context = new TestStateSnapshotContextSynchronousImpl(checkpointId, timestamp, closeableRegistry);
context.getRawKeyedOperatorStateOutput();
context.getRawOperatorStateOutput();
StreamTaskStateInitializerImpl stateInitializer = new StreamTaskStateInitializerImpl(new MockEnvironmentBuilder().build(), new MemoryStateBackend());
StreamOperatorStateContext stateContext = stateInitializer.streamOperatorStateContext(new OperatorID(), "whatever", new TestProcessingTimeService(), new UnUsedKeyContext(), IntSerializer.INSTANCE, closeableRegistry, new InterceptingOperatorMetricGroup(), 1.0, false);
StreamOperatorStateHandler stateHandler = new StreamOperatorStateHandler(stateContext, new ExecutionConfig(), closeableRegistry);
final String keyedStateField = "keyedStateField";
final String operatorStateField = "operatorStateField";
CheckpointedStreamOperator checkpointedStreamOperator = new CheckpointedStreamOperator() {
@Override
public void initializeState(StateInitializationContext context) throws Exception {
context.getKeyedStateStore().getState(new ValueStateDescriptor<>(keyedStateField, LongSerializer.INSTANCE)).update(42L);
context.getOperatorStateStore().getListState(new ListStateDescriptor<>(operatorStateField, LongSerializer.INSTANCE)).add(42L);
}
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
throw new ExpectedTestException();
}
};
stateHandler.setCurrentKey("44");
stateHandler.initializeOperatorState(checkpointedStreamOperator);
assertThat(stateContext.operatorStateBackend().getRegisteredStateNames(), is(not(empty())));
assertThat(((AbstractKeyedStateBackend<?>) stateContext.keyedStateBackend()).numKeyValueStatesByName(), equalTo(1));
try {
stateHandler.snapshotState(checkpointedStreamOperator, Optional.of(stateContext.internalTimerServiceManager()), "42", 42, 42, CheckpointOptions.forCheckpointWithDefaultLocation(), new MemCheckpointStreamFactory(1024), operatorSnapshotResult, context, false);
fail("Exception expected.");
} catch (CheckpointException e) {
// as CheckpointException is wrapping the cause with SerializedThrowable
if (!ExceptionUtils.findThrowableWithMessage(e, ExpectedTestException.MESSAGE).isPresent()) {
throw e;
}
}
assertTrue(keyedStateManagedFuture.isCancelled());
assertTrue(keyedStateRawFuture.isCancelled());
assertTrue(context.getKeyedStateStreamFuture().isCancelled());
assertTrue(operatorStateManagedFuture.isCancelled());
assertTrue(operatorStateRawFuture.isCancelled());
assertTrue(context.getOperatorStateStreamFuture().isCancelled());
assertTrue(inputChannelStateFuture.isCancelled());
assertTrue(resultSubpartitionStateFuture.isCancelled());
stateHandler.dispose();
assertThat(stateContext.operatorStateBackend().getRegisteredBroadcastStateNames(), is(empty()));
assertThat(stateContext.operatorStateBackend().getRegisteredStateNames(), is(empty()));
assertThat(((AbstractKeyedStateBackend<?>) stateContext.keyedStateBackend()).numKeyValueStatesByName(), equalTo(0));
}
}
use of org.apache.flink.runtime.operators.testutils.ExpectedTestException in project flink by apache.
the class CoGroupTaskTest method testFailingSortCoGroupTask.
@Test
public void testFailingSortCoGroupTask() {
int keyCnt1 = 100;
int valCnt1 = 2;
int keyCnt2 = 200;
int valCnt2 = 1;
setOutput(this.output);
addInput(new UniformRecordGenerator(keyCnt1, valCnt1, true));
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, true));
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.CO_GROUP);
final CoGroupDriver<Record, Record, Record> testTask = new CoGroupDriver<Record, Record, Record>();
try {
testDriver(testTask, MockFailingCoGroupStub.class);
Assert.fail("Function exception was not forwarded.");
} catch (ExpectedTestException etex) {
// good!
} catch (Exception e) {
e.printStackTrace();
Assert.fail("The test caused an exception.");
}
}
use of org.apache.flink.runtime.operators.testutils.ExpectedTestException in project flink by apache.
the class CrossTaskTest method testFailingBlockCrossTask2.
@Test
public void testFailingBlockCrossTask2() {
int keyCnt1 = 10;
int valCnt1 = 1;
int keyCnt2 = 100;
int valCnt2 = 4;
setOutput(this.output);
addInput(new UniformRecordGenerator(keyCnt1, valCnt1, false));
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_SECOND);
getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<>();
try {
testDriver(testTask, MockFailingCrossStub.class);
Assert.fail("Exception not forwarded.");
} catch (ExpectedTestException etex) {
// good!
} catch (Exception e) {
e.printStackTrace();
Assert.fail("Test failed due to an exception.");
}
}
use of org.apache.flink.runtime.operators.testutils.ExpectedTestException in project flink by apache.
the class RemoteInputChannelTest method testExceptionOnPersisting.
@Test
public void testExceptionOnPersisting() throws Exception {
// Setup
final SingleInputGate inputGate = createSingleInputGate(1);
final RemoteInputChannel inputChannel = InputChannelBuilder.newBuilder().setStateWriter(new ChannelStateWriter.NoOpChannelStateWriter() {
@Override
public void addInputData(long checkpointId, InputChannelInfo info, int startSeqNum, CloseableIterator<Buffer> data) {
try {
data.close();
} catch (Exception e) {
throw new RuntimeException(e);
}
throw new ExpectedTestException();
}
}).buildRemoteChannel(inputGate);
inputChannel.checkpointStarted(new CheckpointBarrier(42, System.currentTimeMillis(), CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, getDefault())));
final Buffer buffer = createBuffer(TestBufferFactory.BUFFER_SIZE);
assertFalse(buffer.isRecycled());
try {
inputChannel.onBuffer(buffer, 0, -1);
fail("This should have failed");
} catch (ExpectedTestException ex) {
// ignore
}
// This check is not strictly speaking necessary. Generally speaking if exception happens
// during persisting, there are two potentially correct outcomes:
// 1. buffer is recycled only once, in #onBuffer call when handling exception
// 2. buffer is stored inside RemoteInputChannel and recycled on releaseAllResources.
// What's not acceptable is that it would be released twice, in both places. Without this
// check below, we would be just relaying on Buffer throwing IllegalReferenceCountException.
// I've added this check just to be sure. It's freezing the current implementation that's
// unlikely to change, on the other hand, thanks to it we don't need to relay on
// IllegalReferenceCountException being thrown from the Buffer.
//
// In other words, if you end up reading this after refactoring RemoteInputChannel, it might
// be safe to remove this assertion. Just make sure double recycling of the same buffer is
// still throwing IllegalReferenceCountException.
assertFalse(buffer.isRecycled());
inputChannel.releaseAllResources();
assertTrue(buffer.isRecycled());
}
Aggregations