use of org.apache.flink.runtime.state.CompletedCheckpointStorageLocation in project flink by apache.
the class CheckpointMetadataLoadingTest method testUnmatchedCoordinatorOnlyStateFails.
/**
* Tests that savepoint loading fails when there is non-restored coordinator state only, and
* non-restored state is not allowed.
*/
@Test
public void testUnmatchedCoordinatorOnlyStateFails() throws Exception {
final OperatorID operatorID = new OperatorID();
final int maxParallelism = 1234;
final OperatorState state = new OperatorState(operatorID, maxParallelism / 2, maxParallelism);
state.setCoordinatorState(new ByteStreamStateHandle("coordinatorState", new byte[0]));
final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorState(42L, state);
final Map<JobVertexID, ExecutionJobVertex> tasks = Collections.emptyMap();
try {
Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false, CheckpointProperties.forSavepoint(false, SavepointFormatType.CANONICAL), RestoreMode.NO_CLAIM);
fail("Did not throw expected Exception");
} catch (IllegalStateException expected) {
assertTrue(expected.getMessage().contains("allowNonRestoredState"));
}
}
use of org.apache.flink.runtime.state.CompletedCheckpointStorageLocation in project flink by apache.
the class CheckpointMetadataLoadingTest method testMaxParallelismMismatch.
/**
* Tests that savepoint loading fails when there is a max-parallelism mismatch.
*/
@Test
public void testMaxParallelismMismatch() throws Exception {
final OperatorID operatorId = new OperatorID();
final int parallelism = 128128;
final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism);
final Map<JobVertexID, ExecutionJobVertex> tasks = createTasks(operatorId, parallelism, parallelism + 1);
try {
Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false, CheckpointProperties.forSavepoint(false, SavepointFormatType.CANONICAL), RestoreMode.NO_CLAIM);
fail("Did not throw expected Exception");
} catch (IllegalStateException expected) {
assertTrue(expected.getMessage().contains("Max parallelism mismatch"));
}
}
use of org.apache.flink.runtime.state.CompletedCheckpointStorageLocation in project flink by apache.
the class CheckpointMetadataLoadingTest method testNonRestoredStateWhenAllowed.
/**
* Tests that savepoint loading succeeds when there is non-restored state and it is not allowed.
*/
@Test
public void testNonRestoredStateWhenAllowed() throws Exception {
final OperatorID operatorId = new OperatorID();
final int parallelism = 9;
final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism);
final Map<JobVertexID, ExecutionJobVertex> tasks = Collections.emptyMap();
final CompletedCheckpoint loaded = Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, true, CheckpointProperties.forSavepoint(false, SavepointFormatType.CANONICAL), RestoreMode.NO_CLAIM);
assertTrue(loaded.getOperatorStates().isEmpty());
}
use of org.apache.flink.runtime.state.CompletedCheckpointStorageLocation in project flink by apache.
the class CheckpointMetadataLoadingTest method testNonRestoredStateWhenDisallowed.
/**
* Tests that savepoint loading fails when there is non-restored state, but it is not allowed.
*/
@Test
public void testNonRestoredStateWhenDisallowed() throws Exception {
final OperatorID operatorId = new OperatorID();
final int parallelism = 9;
final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism);
final Map<JobVertexID, ExecutionJobVertex> tasks = Collections.emptyMap();
try {
Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false, CheckpointProperties.forSavepoint(false, SavepointFormatType.CANONICAL), RestoreMode.NO_CLAIM);
fail("Did not throw expected Exception");
} catch (IllegalStateException expected) {
assertTrue(expected.getMessage().contains("allowNonRestoredState"));
}
}
Aggregations