use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class AbstractUdfStreamOperatorLifecycleTest method testLifeCycleCancel.
@Test
public void testLifeCycleCancel() throws Exception {
ACTUAL_ORDER_TRACKING.clear();
Configuration taskManagerConfig = new Configuration();
StreamConfig cfg = new StreamConfig(new Configuration());
MockSourceFunction srcFun = new MockSourceFunction();
cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, false));
cfg.setOperatorID(new OperatorID());
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
Task task = StreamTaskTest.createTask(SourceStreamTask.class, shuffleEnvironment, cfg, taskManagerConfig);
task.startTaskThread();
LifecycleTrackingStreamSource.runStarted.await();
// this should cancel the task even though it is blocked on runFinished
task.cancelExecution();
// wait for clean termination
task.getExecutingThread().join();
assertEquals(ExecutionState.CANCELED, task.getExecutionState());
assertEquals(EXPECTED_CALL_ORDER_CANCEL_RUNNING, ACTUAL_ORDER_TRACKING);
}
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class RocksDBAsyncSnapshotTest method testFullyAsyncSnapshot.
/**
* This ensures that asynchronous state handles are actually materialized asynchronously.
*
* <p>We use latches to block at various stages and see if the code still continues through the
* parts that are not asynchronous. If the checkpoint is not done asynchronously the test will
* simply lock forever.
*/
@Test
public void testFullyAsyncSnapshot() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
testHarness.configureForKeyedStream(new KeySelector<String, String>() {
@Override
public String getKey(String value) throws Exception {
return value;
}
}, BasicTypeInfo.STRING_TYPE_INFO);
StreamConfig streamConfig = testHarness.getStreamConfig();
File dbDir = temporaryFolder.newFolder();
RocksDBStateBackend backend = new RocksDBStateBackend(new MemoryStateBackend());
backend.setDbStoragePath(dbDir.getAbsolutePath());
streamConfig.setStateBackend(backend);
streamConfig.setStreamOperator(new AsyncCheckpointOperator());
streamConfig.setOperatorID(new OperatorID());
final OneShotLatch delayCheckpointLatch = new OneShotLatch();
final OneShotLatch ensureCheckpointLatch = new OneShotLatch();
CheckpointResponder checkpointResponderMock = new CheckpointResponder() {
@Override
public void acknowledgeCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics, TaskStateSnapshot subtaskState) {
// even though the async checkpoint would not finish
try {
delayCheckpointLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
boolean hasManagedKeyedState = false;
for (Map.Entry<OperatorID, OperatorSubtaskState> entry : subtaskState.getSubtaskStateMappings()) {
OperatorSubtaskState state = entry.getValue();
if (state != null) {
hasManagedKeyedState |= state.getManagedKeyedState() != null;
}
}
// should be one k/v state
assertTrue(hasManagedKeyedState);
// we now know that the checkpoint went through
ensureCheckpointLatch.trigger();
}
@Override
public void reportCheckpointMetrics(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics) {
}
@Override
public void declineCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointException checkpointException) {
}
};
JobID jobID = new JobID();
ExecutionAttemptID executionAttemptID = new ExecutionAttemptID();
TestTaskStateManager taskStateManagerTestMock = new TestTaskStateManager(jobID, executionAttemptID, checkpointResponderMock, TestLocalRecoveryConfig.disabled(), new InMemoryStateChangelogStorage(), new HashMap<>(), -1L, new OneShotLatch());
StreamMockEnvironment mockEnv = new StreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize, taskStateManagerTestMock);
AtomicReference<Throwable> errorRef = new AtomicReference<>();
mockEnv.setExternalExceptionHandler(errorRef::set);
testHarness.invoke(mockEnv);
testHarness.waitForTaskRunning();
final OneInputStreamTask<String, String> task = testHarness.getTask();
task.triggerCheckpointAsync(new CheckpointMetaData(42, 17), CheckpointOptions.forCheckpointWithDefaultLocation()).get();
testHarness.processElement(new StreamRecord<>("Wohoo", 0));
// now we allow the checkpoint
delayCheckpointLatch.trigger();
// wait for the checkpoint to go through
ensureCheckpointLatch.await();
testHarness.endInput();
ExecutorService threadPool = task.getAsyncOperationsThreadPool();
threadPool.shutdown();
Assert.assertTrue(threadPool.awaitTermination(60_000, TimeUnit.MILLISECONDS));
testHarness.waitForTaskCompletion();
if (errorRef.get() != null) {
fail("Unexpected exception during execution.");
}
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class StreamOperatorChainingTest method testMultiChainingWithSplit.
/**
* Verify that multi-chaining works with object reuse enabled.
*/
private void testMultiChainingWithSplit(StreamExecutionEnvironment env) throws Exception {
// set parallelism to 2 to avoid chaining with source in case when available processors is
// 1.
env.setParallelism(2);
// the actual elements will not be used
DataStream<Integer> input = env.fromElements(1, 2, 3);
sink1Results = new ArrayList<>();
sink2Results = new ArrayList<>();
sink3Results = new ArrayList<>();
input = input.map(value -> value);
OutputTag<Integer> oneOutput = new OutputTag<Integer>("one") {
};
OutputTag<Integer> otherOutput = new OutputTag<Integer>("other") {
};
SingleOutputStreamOperator<Object> split = input.process(new ProcessFunction<Integer, Object>() {
private static final long serialVersionUID = 1L;
@Override
public void processElement(Integer value, Context ctx, Collector<Object> out) throws Exception {
if (value.equals(1)) {
ctx.output(oneOutput, value);
} else {
ctx.output(otherOutput, value);
}
}
});
split.getSideOutput(oneOutput).map(value -> "First 1: " + value).addSink(new SinkFunction<String>() {
@Override
public void invoke(String value, Context ctx) throws Exception {
sink1Results.add(value);
}
});
split.getSideOutput(oneOutput).map(value -> "First 2: " + value).addSink(new SinkFunction<String>() {
@Override
public void invoke(String value, Context ctx) throws Exception {
sink2Results.add(value);
}
});
split.getSideOutput(otherOutput).map(value -> "Second: " + value).addSink(new SinkFunction<String>() {
@Override
public void invoke(String value, Context ctx) throws Exception {
sink3Results.add(value);
}
});
// be build our own StreamTask and OperatorChain
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
Assert.assertTrue(jobGraph.getVerticesSortedTopologicallyFromSources().size() == 2);
JobVertex chainedVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
Configuration configuration = chainedVertex.getConfiguration();
StreamConfig streamConfig = new StreamConfig(configuration);
StreamMap<Integer, Integer> headOperator = streamConfig.getStreamOperator(Thread.currentThread().getContextClassLoader());
try (MockEnvironment environment = createMockEnvironment(chainedVertex.getName())) {
StreamTask<Integer, StreamMap<Integer, Integer>> mockTask = createMockTask(streamConfig, environment);
OperatorChain<Integer, StreamMap<Integer, Integer>> operatorChain = createOperatorChain(streamConfig, environment, mockTask);
headOperator.setup(mockTask, streamConfig, operatorChain.getMainOperatorOutput());
operatorChain.initializeStateAndOpenOperators(null);
headOperator.processElement(new StreamRecord<>(1));
headOperator.processElement(new StreamRecord<>(2));
headOperator.processElement(new StreamRecord<>(3));
assertThat(sink1Results, contains("First 1: 1"));
assertThat(sink2Results, contains("First 2: 1"));
assertThat(sink3Results, contains("Second: 2", "Second: 3"));
}
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class InputProcessorUtilTest method testCreateCheckpointedMultipleInputGate.
@Test
public void testCreateCheckpointedMultipleInputGate() throws Exception {
try (CloseableRegistry registry = new CloseableRegistry()) {
MockEnvironment environment = new MockEnvironmentBuilder().build();
MockStreamTask streamTask = new MockStreamTaskBuilder(environment).build();
StreamConfig streamConfig = new StreamConfig(environment.getJobConfiguration());
streamConfig.setCheckpointMode(CheckpointingMode.EXACTLY_ONCE);
streamConfig.setUnalignedCheckpointsEnabled(true);
// First input gate has index larger than the second
List<IndexedInputGate>[] inputGates = new List[] { Collections.singletonList(getGate(1, 4)), Collections.singletonList(getGate(0, 2)) };
CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler(streamTask, streamConfig, new TestSubtaskCheckpointCoordinator(new MockChannelStateWriter()), streamTask.getName(), inputGates, Collections.emptyList(), new SyncMailboxExecutor(), new TestProcessingTimeService());
CheckpointedInputGate[] checkpointedMultipleInputGate = InputProcessorUtil.createCheckpointedMultipleInputGate(new SyncMailboxExecutor(), inputGates, environment.getMetricGroup().getIOMetricGroup(), barrierHandler, streamConfig);
for (CheckpointedInputGate checkpointedInputGate : checkpointedMultipleInputGate) {
registry.registerCloseable(checkpointedInputGate);
}
List<IndexedInputGate> allInputGates = Arrays.stream(inputGates).flatMap(gates -> gates.stream()).collect(Collectors.toList());
for (IndexedInputGate inputGate : allInputGates) {
for (int channelId = 0; channelId < inputGate.getNumberOfInputChannels(); channelId++) {
barrierHandler.processBarrier(new CheckpointBarrier(1, 42, CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, CheckpointStorageLocationReference.getDefault())), new InputChannelInfo(inputGate.getGateIndex(), channelId), false);
}
}
assertTrue(barrierHandler.getAllBarriersReceivedFuture(1).isDone());
}
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class TestProcessingTimeServiceTest method testCustomTimeServiceProvider.
@Test
public void testCustomTimeServiceProvider() throws Throwable {
final TestProcessingTimeService tp = new TestProcessingTimeService();
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>((env) -> new OneInputStreamTask<>(env, tp), BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamMap<String, String> mapOperator = new StreamMap<>(new StreamTaskTimerTest.DummyMapFunction<>());
streamConfig.setStreamOperator(mapOperator);
streamConfig.setOperatorID(new OperatorID());
testHarness.invoke();
testHarness.waitForTaskRunning();
ProcessingTimeService processingTimeService = ((StreamMap<?, ?>) testHarness.getHeadOperator()).getProcessingTimeService();
assertEquals(Long.MIN_VALUE, processingTimeService.getCurrentProcessingTime());
tp.setCurrentTime(11);
assertEquals(processingTimeService.getCurrentProcessingTime(), 11);
tp.setCurrentTime(15);
tp.setCurrentTime(16);
assertEquals(processingTimeService.getCurrentProcessingTime(), 16);
// register 2 tasks
processingTimeService.registerTimer(30, timestamp -> {
});
processingTimeService.registerTimer(40, timestamp -> {
});
assertEquals(2, tp.getNumActiveTimers());
tp.setCurrentTime(35);
assertEquals(1, tp.getNumActiveTimers());
tp.setCurrentTime(40);
assertEquals(0, tp.getNumActiveTimers());
tp.shutdownService();
}
Aggregations