use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class StreamSourceOperatorLatencyMetricsTest method testLatencyMarkEmission.
private void testLatencyMarkEmission(int numberLatencyMarkers, OperatorSetupOperation operatorSetup) throws Exception {
final List<StreamElement> output = new ArrayList<>();
final TestProcessingTimeService testProcessingTimeService = new TestProcessingTimeService();
testProcessingTimeService.setCurrentTime(0L);
final List<Long> processingTimes = Arrays.asList(1L, 10L, 11L, 21L, maxProcessingTime);
// regular stream source operator
final StreamSource<Long, ProcessingTimeServiceSource> operator = new StreamSource<>(new ProcessingTimeServiceSource(testProcessingTimeService, processingTimes));
operatorSetup.setupSourceOperator(operator, testProcessingTimeService);
// run and wait to be stopped
OperatorChain<?, ?> operatorChain = new RegularOperatorChain<>(operator.getContainingTask(), StreamTask.createRecordWriterDelegate(operator.getOperatorConfig(), new MockEnvironmentBuilder().build()));
try {
operator.run(new Object(), new CollectorOutput<>(output), operatorChain);
operator.finish();
} finally {
operatorChain.close();
}
assertEquals(numberLatencyMarkers, output.size());
long timestamp = 0L;
int expectedLatencyIndex = 0;
int i = 0;
// verify that its only latency markers
for (; i < numberLatencyMarkers; i++) {
StreamElement se = output.get(i);
Assert.assertTrue(se.isLatencyMarker());
Assert.assertEquals(operator.getOperatorID(), se.asLatencyMarker().getOperatorId());
Assert.assertEquals(0, se.asLatencyMarker().getSubtaskIndex());
// as a result of which we never emit both 10 and 11
while (timestamp > processingTimes.get(expectedLatencyIndex)) {
expectedLatencyIndex++;
}
Assert.assertEquals(processingTimes.get(expectedLatencyIndex).longValue(), se.asLatencyMarker().getMarkedTime());
timestamp += latencyMarkInterval;
}
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class InputProcessorUtilTest method testCreateCheckpointedMultipleInputGate.
@Test
public void testCreateCheckpointedMultipleInputGate() throws Exception {
try (CloseableRegistry registry = new CloseableRegistry()) {
MockEnvironment environment = new MockEnvironmentBuilder().build();
MockStreamTask streamTask = new MockStreamTaskBuilder(environment).build();
StreamConfig streamConfig = new StreamConfig(environment.getJobConfiguration());
streamConfig.setCheckpointMode(CheckpointingMode.EXACTLY_ONCE);
streamConfig.setUnalignedCheckpointsEnabled(true);
// First input gate has index larger than the second
List<IndexedInputGate>[] inputGates = new List[] { Collections.singletonList(getGate(1, 4)), Collections.singletonList(getGate(0, 2)) };
CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler(streamTask, streamConfig, new TestSubtaskCheckpointCoordinator(new MockChannelStateWriter()), streamTask.getName(), inputGates, Collections.emptyList(), new SyncMailboxExecutor(), new TestProcessingTimeService());
CheckpointedInputGate[] checkpointedMultipleInputGate = InputProcessorUtil.createCheckpointedMultipleInputGate(new SyncMailboxExecutor(), inputGates, environment.getMetricGroup().getIOMetricGroup(), barrierHandler, streamConfig);
for (CheckpointedInputGate checkpointedInputGate : checkpointedMultipleInputGate) {
registry.registerCloseable(checkpointedInputGate);
}
List<IndexedInputGate> allInputGates = Arrays.stream(inputGates).flatMap(gates -> gates.stream()).collect(Collectors.toList());
for (IndexedInputGate inputGate : allInputGates) {
for (int channelId = 0; channelId < inputGate.getNumberOfInputChannels(); channelId++) {
barrierHandler.processBarrier(new CheckpointBarrier(1, 42, CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, CheckpointStorageLocationReference.getDefault())), new InputChannelInfo(inputGate.getGateIndex(), channelId), false);
}
}
assertTrue(barrierHandler.getAllBarriersReceivedFuture(1).isDone());
}
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class TestProcessingTimeServiceTest method testCustomTimeServiceProvider.
@Test
public void testCustomTimeServiceProvider() throws Throwable {
final TestProcessingTimeService tp = new TestProcessingTimeService();
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>((env) -> new OneInputStreamTask<>(env, tp), BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamMap<String, String> mapOperator = new StreamMap<>(new StreamTaskTimerTest.DummyMapFunction<>());
streamConfig.setStreamOperator(mapOperator);
streamConfig.setOperatorID(new OperatorID());
testHarness.invoke();
testHarness.waitForTaskRunning();
ProcessingTimeService processingTimeService = ((StreamMap<?, ?>) testHarness.getHeadOperator()).getProcessingTimeService();
assertEquals(Long.MIN_VALUE, processingTimeService.getCurrentProcessingTime());
tp.setCurrentTime(11);
assertEquals(processingTimeService.getCurrentProcessingTime(), 11);
tp.setCurrentTime(15);
tp.setCurrentTime(16);
assertEquals(processingTimeService.getCurrentProcessingTime(), 16);
// register 2 tasks
processingTimeService.registerTimer(30, timestamp -> {
});
processingTimeService.registerTimer(40, timestamp -> {
});
assertEquals(2, tp.getNumActiveTimers());
tp.setCurrentTime(35);
assertEquals(1, tp.getNumActiveTimers());
tp.setCurrentTime(40);
assertEquals(0, tp.getNumActiveTimers());
tp.shutdownService();
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class Kafka09FetcherTest method ensureOffsetsGetCommitted.
@Test
public void ensureOffsetsGetCommitted() throws Exception {
// test data
final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
testCommitData1.put(testPartition1, 11L);
testCommitData1.put(testPartition2, 18L);
final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
testCommitData2.put(testPartition1, 19L);
testCommitData2.put(testPartition2, 28L);
final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
// ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
final MultiShotLatch blockerLatch = new MultiShotLatch();
KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
blockerLatch.await();
return ConsumerRecords.empty();
}
});
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
blockerLatch.trigger();
return null;
}
}).when(mockConsumer).wakeup();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
@SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
commitStore.add(offsets);
callback.onComplete(offsets, null);
return null;
}
}).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
// make sure the fetcher creates the mock consumer
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- create the test fetcher -----
@SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// ----- trigger the first offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData1);
Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(12L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(17L, entry.getValue().offset());
}
}
// ----- trigger the second offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData2);
Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(20L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(27L, entry.getValue().offset());
}
}
// ----- test done, wait till the fetcher is done for a clean shutdown -----
fetcher.cancel();
fetcherRunner.join();
// check that there were no errors in the fetcher
final Throwable caughtError = error.get();
if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
throw new Exception("Exception in the fetcher", caughtError);
}
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class AbstractFetcherTest method testPunctuatedWatermarks.
// ------------------------------------------------------------------------
// Timestamps & watermarks tests
// ------------------------------------------------------------------------
@Test
public void testPunctuatedWatermarks() throws Exception {
final String testTopic = "test topic name";
Map<KafkaTopicPartition, Long> originalPartitions = new HashMap<>();
originalPartitions.put(new KafkaTopicPartition(testTopic, 7), KafkaTopicPartitionStateSentinel.LATEST_OFFSET);
originalPartitions.put(new KafkaTopicPartition(testTopic, 13), KafkaTopicPartitionStateSentinel.LATEST_OFFSET);
originalPartitions.put(new KafkaTopicPartition(testTopic, 21), KafkaTopicPartitionStateSentinel.LATEST_OFFSET);
TestSourceContext<Long> sourceContext = new TestSourceContext<>();
TestProcessingTimeService processingTimeProvider = new TestProcessingTimeService();
TestFetcher<Long> fetcher = new TestFetcher<>(sourceContext, originalPartitions, null, /* periodic watermark assigner */
new SerializedValue<AssignerWithPunctuatedWatermarks<Long>>(new PunctuatedTestExtractor()), processingTimeProvider, 0);
final KafkaTopicPartitionState<Object> part1 = fetcher.subscribedPartitionStates()[0];
final KafkaTopicPartitionState<Object> part2 = fetcher.subscribedPartitionStates()[1];
final KafkaTopicPartitionState<Object> part3 = fetcher.subscribedPartitionStates()[2];
// elements generate a watermark if the timestamp is a multiple of three
// elements for partition 1
fetcher.emitRecord(1L, part1, 1L);
fetcher.emitRecord(2L, part1, 2L);
fetcher.emitRecord(3L, part1, 3L);
assertEquals(3L, sourceContext.getLatestElement().getValue().longValue());
assertEquals(3L, sourceContext.getLatestElement().getTimestamp());
assertFalse(sourceContext.hasWatermark());
// elements for partition 2
fetcher.emitRecord(12L, part2, 1L);
assertEquals(12L, sourceContext.getLatestElement().getValue().longValue());
assertEquals(12L, sourceContext.getLatestElement().getTimestamp());
assertFalse(sourceContext.hasWatermark());
// elements for partition 3
fetcher.emitRecord(101L, part3, 1L);
fetcher.emitRecord(102L, part3, 2L);
assertEquals(102L, sourceContext.getLatestElement().getValue().longValue());
assertEquals(102L, sourceContext.getLatestElement().getTimestamp());
// now, we should have a watermark
assertTrue(sourceContext.hasWatermark());
assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp());
// advance partition 3
fetcher.emitRecord(1003L, part3, 3L);
fetcher.emitRecord(1004L, part3, 4L);
fetcher.emitRecord(1005L, part3, 5L);
assertEquals(1005L, sourceContext.getLatestElement().getValue().longValue());
assertEquals(1005L, sourceContext.getLatestElement().getTimestamp());
// advance partition 1 beyond partition 2 - this bumps the watermark
fetcher.emitRecord(30L, part1, 4L);
assertEquals(30L, sourceContext.getLatestElement().getValue().longValue());
assertEquals(30L, sourceContext.getLatestElement().getTimestamp());
assertTrue(sourceContext.hasWatermark());
assertEquals(12L, sourceContext.getLatestWatermark().getTimestamp());
// advance partition 2 again - this bumps the watermark
fetcher.emitRecord(13L, part2, 2L);
assertFalse(sourceContext.hasWatermark());
fetcher.emitRecord(14L, part2, 3L);
assertFalse(sourceContext.hasWatermark());
fetcher.emitRecord(15L, part2, 3L);
assertTrue(sourceContext.hasWatermark());
assertEquals(15L, sourceContext.getLatestWatermark().getTimestamp());
}
Aggregations