use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class StateBootstrapTransformationTest method testOperatorSpecificMaxParallelismRespected.
@Test
public void testOperatorSpecificMaxParallelismRespected() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
DataStream<Integer> input = env.fromElements(0);
StateBootstrapTransformation<Integer> transformation = OperatorTransformation.bootstrapWith(input).setMaxParallelism(1).transform(new ExampleStateBootstrapFunction());
int maxParallelism = transformation.getMaxParallelism(4);
DataStream<TaggedOperatorSubtaskState> result = transformation.writeOperatorSubtaskStates(OperatorIDGenerator.fromUid("uid"), new HashMapStateBackend(), new Path(), maxParallelism);
Assert.assertEquals("The parallelism of a data set should be constrained my the savepoint max parallelism", 1, result.getParallelism());
}
use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class StateBootstrapTransformationTest method testStreamConfig.
@Test
public void testStreamConfig() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> input = env.fromElements("");
StateBootstrapTransformation<String> transformation = OperatorTransformation.bootstrapWith(input).keyBy(new CustomKeySelector()).transform(new ExampleKeyedStateBootstrapFunction());
StreamConfig config = transformation.getConfig(OperatorIDGenerator.fromUid("uid"), new HashMapStateBackend(), new Configuration(), null);
KeySelector selector = config.getStatePartitioner(0, Thread.currentThread().getContextClassLoader());
Assert.assertEquals("Incorrect key selector forwarded to stream operator", CustomKeySelector.class, selector.getClass());
}
use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class StateBootstrapTransformationTest method testDefaultParallelismRespectedWhenLessThanMaxParallelism.
@Test
public void testDefaultParallelismRespectedWhenLessThanMaxParallelism() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
DataStream<Integer> input = env.fromElements(0);
StateBootstrapTransformation<Integer> transformation = OperatorTransformation.bootstrapWith(input).transform(new ExampleStateBootstrapFunction());
int maxParallelism = transformation.getMaxParallelism(10);
DataStream<TaggedOperatorSubtaskState> result = transformation.writeOperatorSubtaskStates(OperatorIDGenerator.fromUid("uid"), new HashMapStateBackend(), new Path(), maxParallelism);
Assert.assertEquals("The parallelism of a data set should not change when less than the max parallelism of the savepoint", env.getParallelism(), result.getParallelism());
}
use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class SavepointReaderITTestBase method verifyListState.
private void verifyListState(String path, StreamExecutionEnvironment env) throws Exception {
SavepointReader savepoint = SavepointReader.read(env, path, new HashMapStateBackend());
List<Integer> listResult = JobResultRetriever.collect(readListState(savepoint));
listResult.sort(Comparator.naturalOrder());
Assert.assertEquals("Unexpected elements read from list state", SavepointSource.getElements(), listResult);
}
use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class ChangelogStateBackendLoadingTest method testLoadingDefault.
@Test
public void testLoadingDefault() throws Exception {
final StateBackend backend = StateBackendLoader.fromApplicationOrConfigOrDefault(null, TernaryBoolean.UNDEFINED, config(), cl, null);
final CheckpointStorage storage = CheckpointStorageLoader.load(null, null, backend, config(), cl, null);
assertTrue(backend instanceof HashMapStateBackend);
}
Aggregations