use of org.apache.flink.runtime.execution.Environment in project flink by apache.
the class StreamOperatorChainingTest method testMultiChaining.
/**
* Verify that multi-chaining works.
*/
private void testMultiChaining(StreamExecutionEnvironment env) throws Exception {
// set parallelism to 2 to avoid chaining with source in case when available processors is
// 1.
env.setParallelism(2);
// the actual elements will not be used
DataStream<Integer> input = env.fromElements(1, 2, 3);
sink1Results = new ArrayList<>();
sink2Results = new ArrayList<>();
input = input.map(value -> value);
input.map(value -> "First: " + value).addSink(new SinkFunction<String>() {
@Override
public void invoke(String value, Context ctx) throws Exception {
sink1Results.add(value);
}
});
input.map(value -> "Second: " + value).addSink(new SinkFunction<String>() {
@Override
public void invoke(String value, Context ctx) throws Exception {
sink2Results.add(value);
}
});
// be build our own StreamTask and OperatorChain
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
Assert.assertTrue(jobGraph.getVerticesSortedTopologicallyFromSources().size() == 2);
JobVertex chainedVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
Configuration configuration = chainedVertex.getConfiguration();
StreamConfig streamConfig = new StreamConfig(configuration);
StreamMap<Integer, Integer> headOperator = streamConfig.getStreamOperator(Thread.currentThread().getContextClassLoader());
try (MockEnvironment environment = createMockEnvironment(chainedVertex.getName())) {
StreamTask<Integer, StreamMap<Integer, Integer>> mockTask = createMockTask(streamConfig, environment);
OperatorChain<Integer, StreamMap<Integer, Integer>> operatorChain = createOperatorChain(streamConfig, environment, mockTask);
headOperator.setup(mockTask, streamConfig, operatorChain.getMainOperatorOutput());
operatorChain.initializeStateAndOpenOperators(null);
headOperator.processElement(new StreamRecord<>(1));
headOperator.processElement(new StreamRecord<>(2));
headOperator.processElement(new StreamRecord<>(3));
assertThat(sink1Results, contains("First: 1", "First: 2", "First: 3"));
assertThat(sink2Results, contains("Second: 1", "Second: 2", "Second: 3"));
}
}
use of org.apache.flink.runtime.execution.Environment in project flink by apache.
the class StreamTaskSystemExitTest method testCancelSystemExitStreamTask.
@Test(expected = UserSystemExitException.class)
public void testCancelSystemExitStreamTask() throws Exception {
Environment mockEnvironment = new MockEnvironmentBuilder().build();
SystemExitStreamTask systemExitStreamTask = new SystemExitStreamTask(mockEnvironment, SystemExitStreamTask.ExitPoint.CANCEL);
systemExitStreamTask.cancel();
}
use of org.apache.flink.runtime.execution.Environment in project flink by apache.
the class StreamTaskStateInitializerImplTest method testWithRestore.
@SuppressWarnings("unchecked")
@Test
public void testWithRestore() throws Exception {
StateBackend mockingBackend = spy(new StateBackend() {
@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws Exception {
return mock(AbstractKeyedStateBackend.class);
}
@Override
public OperatorStateBackend createOperatorStateBackend(Environment env, String operatorIdentifier, @Nonnull Collection<OperatorStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws Exception {
return mock(OperatorStateBackend.class);
}
});
OperatorID operatorID = new OperatorID(47L, 11L);
TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
Random random = new Random(0x42);
OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setManagedOperatorState(new OperatorStreamStateHandle(Collections.singletonMap("a", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10 }, SPLIT_DISTRIBUTE)), CheckpointTestUtils.createDummyStreamStateHandle(random, null))).setRawOperatorState(new OperatorStreamStateHandle(Collections.singletonMap("_default_", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 20, 30 }, SPLIT_DISTRIBUTE)), CheckpointTestUtils.createDummyStreamStateHandle(random, null))).setManagedKeyedState(CheckpointTestUtils.createDummyKeyGroupStateHandle(random, null)).setRawKeyedState(CheckpointTestUtils.createDummyKeyGroupStateHandle(random, null)).setInputChannelState(singleton(createNewInputChannelStateHandle(10, random))).setResultSubpartitionState(singleton(createNewResultSubpartitionStateHandle(10, random))).build();
taskStateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState);
JobManagerTaskRestore jobManagerTaskRestore = new JobManagerTaskRestore(42L, taskStateSnapshot);
StreamTaskStateInitializer streamTaskStateManager = streamTaskStateManager(mockingBackend, jobManagerTaskRestore, false);
AbstractStreamOperator<?> streamOperator = mock(AbstractStreamOperator.class);
when(streamOperator.getOperatorID()).thenReturn(operatorID);
TypeSerializer<?> typeSerializer = new IntSerializer();
CloseableRegistry closeableRegistry = new CloseableRegistry();
StreamOperatorStateContext stateContext = streamTaskStateManager.streamOperatorStateContext(streamOperator.getOperatorID(), streamOperator.getClass().getSimpleName(), new TestProcessingTimeService(), streamOperator, typeSerializer, closeableRegistry, new UnregisteredMetricsGroup(), 1.0, false);
OperatorStateBackend operatorStateBackend = stateContext.operatorStateBackend();
CheckpointableKeyedStateBackend<?> keyedStateBackend = stateContext.keyedStateBackend();
InternalTimeServiceManager<?> timeServiceManager = stateContext.internalTimerServiceManager();
CloseableIterable<KeyGroupStatePartitionStreamProvider> keyedStateInputs = stateContext.rawKeyedStateInputs();
CloseableIterable<StatePartitionStreamProvider> operatorStateInputs = stateContext.rawOperatorStateInputs();
Assert.assertTrue("Expected the context to be restored", stateContext.isRestored());
Assert.assertEquals(OptionalLong.of(42L), stateContext.getRestoredCheckpointId());
Assert.assertNotNull(operatorStateBackend);
Assert.assertNotNull(keyedStateBackend);
// this is deactivated on purpose so that it does not attempt to consume the raw keyed
// state.
Assert.assertNull(timeServiceManager);
Assert.assertNotNull(keyedStateInputs);
Assert.assertNotNull(operatorStateInputs);
int count = 0;
for (KeyGroupStatePartitionStreamProvider keyedStateInput : keyedStateInputs) {
++count;
}
Assert.assertEquals(1, count);
count = 0;
for (StatePartitionStreamProvider operatorStateInput : operatorStateInputs) {
++count;
}
Assert.assertEquals(3, count);
checkCloseablesRegistered(closeableRegistry, operatorStateBackend, keyedStateBackend, keyedStateInputs, operatorStateInputs);
}
Aggregations