use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class BatchExecutionInternalTimeServiceTest method testProcessingTimeTimers.
@Test
public void testProcessingTimeTimers() {
BatchExecutionKeyedStateBackend<Integer> keyedStatedBackend = new BatchExecutionKeyedStateBackend<>(KEY_SERIALIZER, new KeyGroupRange(0, 1));
TestProcessingTimeService processingTimeService = new TestProcessingTimeService();
InternalTimeServiceManager<Integer> timeServiceManager = BatchExecutionInternalTimeServiceManager.create(keyedStatedBackend, this.getClass().getClassLoader(), new DummyKeyContext(), processingTimeService, Collections.emptyList());
List<Long> timers = new ArrayList<>();
InternalTimerService<VoidNamespace> timerService = timeServiceManager.getInternalTimerService("test", KEY_SERIALIZER, new VoidNamespaceSerializer(), LambdaTrigger.processingTimeTrigger(timer -> timers.add(timer.getTimestamp())));
keyedStatedBackend.setCurrentKey(1);
timerService.registerProcessingTimeTimer(VoidNamespace.INSTANCE, 150);
// we should never register physical timers
assertThat(processingTimeService.getNumActiveTimers(), equalTo(0));
// changing the current key fires all timers
keyedStatedBackend.setCurrentKey(2);
assertThat(timers, equalTo(Collections.singletonList(150L)));
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class InternalTimerServiceImplTest method testTimerAssignmentToKeyGroups.
@Test
public void testTimerAssignmentToKeyGroups() {
int totalNoOfTimers = 100;
int totalNoOfKeyGroups = 100;
int startKeyGroupIdx = 0;
// we have 0 to 99
int endKeyGroupIdx = totalNoOfKeyGroups - 1;
@SuppressWarnings("unchecked") Set<TimerHeapInternalTimer<Integer, String>>[] expectedNonEmptyTimerSets = new HashSet[totalNoOfKeyGroups];
TestKeyContext keyContext = new TestKeyContext();
final KeyGroupRange keyGroupRange = new KeyGroupRange(startKeyGroupIdx, endKeyGroupIdx);
final PriorityQueueSetFactory priorityQueueSetFactory = createQueueFactory(keyGroupRange, totalNoOfKeyGroups);
InternalTimerServiceImpl<Integer, String> timerService = createInternalTimerService(keyGroupRange, keyContext, new TestProcessingTimeService(), IntSerializer.INSTANCE, StringSerializer.INSTANCE, priorityQueueSetFactory);
timerService.startTimerService(IntSerializer.INSTANCE, StringSerializer.INSTANCE, mock(Triggerable.class));
for (int i = 0; i < totalNoOfTimers; i++) {
// create the timer to be registered
TimerHeapInternalTimer<Integer, String> timer = new TimerHeapInternalTimer<>(10 + i, i, "hello_world_" + i);
int keyGroupIdx = KeyGroupRangeAssignment.assignToKeyGroup(timer.getKey(), totalNoOfKeyGroups);
// add it in the adequate expected set of timers per keygroup
Set<TimerHeapInternalTimer<Integer, String>> timerSet = expectedNonEmptyTimerSets[keyGroupIdx];
if (timerSet == null) {
timerSet = new HashSet<>();
expectedNonEmptyTimerSets[keyGroupIdx] = timerSet;
}
timerSet.add(timer);
// register the timer as both processing and event time one
keyContext.setCurrentKey(timer.getKey());
timerService.registerEventTimeTimer(timer.getNamespace(), timer.getTimestamp());
timerService.registerProcessingTimeTimer(timer.getNamespace(), timer.getTimestamp());
}
List<Set<TimerHeapInternalTimer<Integer, String>>> eventTimeTimers = timerService.getEventTimeTimersPerKeyGroup();
List<Set<TimerHeapInternalTimer<Integer, String>>> processingTimeTimers = timerService.getProcessingTimeTimersPerKeyGroup();
// finally verify that the actual timers per key group sets are the expected ones.
for (int i = 0; i < expectedNonEmptyTimerSets.length; i++) {
Set<TimerHeapInternalTimer<Integer, String>> expected = expectedNonEmptyTimerSets[i];
Set<TimerHeapInternalTimer<Integer, String>> actualEvent = eventTimeTimers.get(i);
Set<TimerHeapInternalTimer<Integer, String>> actualProcessing = processingTimeTimers.get(i);
if (expected == null) {
Assert.assertTrue(actualEvent.isEmpty());
Assert.assertTrue(actualProcessing.isEmpty());
} else {
Assert.assertEquals(expected, actualEvent);
Assert.assertEquals(expected, actualProcessing);
}
}
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class StreamSourceContextIdleDetectionTests method testManualWatermarkContext.
/**
* Test scenario (idleTimeout = 100): (1) Start from 0 as initial time. (2) As soon as time
* reaches 100, status should have been toggled to IDLE. (3) After some arbitrary time (until
* 300), the status should remain IDLE. (4) Emit a record at 310. Status should become ACTIVE.
* This should fire a idleness detection at 410. (5) Emit another record at 320 (which is before
* the next check). This should make the idleness check pass. (6) Advance time to 410 and
* trigger idleness detection. The status should still be ACTIVE due to step (5). Another
* idleness detection should be fired at 510. (7) Advance time to 510 and trigger idleness
* detection. Since no records were collected in-between the two idleness detections, status
* should have been toggle back to IDLE.
*
* <p>Inline comments will refer to the corresponding tested steps in the scenario.
*/
@Test
public void testManualWatermarkContext() throws Exception {
long idleTimeout = 100;
long initialTime = 0;
TestProcessingTimeService processingTimeService = new TestProcessingTimeService();
processingTimeService.setCurrentTime(initialTime);
final List<StreamElement> output = new ArrayList<>();
final List<StreamElement> expectedOutput = new ArrayList<>();
SourceFunction.SourceContext<String> context = StreamSourceContexts.getSourceContext(TimeCharacteristic.EventTime, processingTimeService, new Object(), new CollectorOutput<>(output), 0, idleTimeout, true);
// -------------------------- begin test scenario --------------------------
// corresponds to step (2) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + idleTimeout);
expectedOutput.add(WatermarkStatus.IDLE);
assertThat(output, equalTo(expectedOutput));
// corresponds to step (3) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 2 * idleTimeout);
processingTimeService.setCurrentTime(initialTime + 3 * idleTimeout);
assertThat(output, equalTo(expectedOutput));
// corresponds to step (4) of scenario (please see method-level Javadoc comment)
expectedOutput.add(WatermarkStatus.ACTIVE);
emitStreamElement(initialTime + 3 * idleTimeout + idleTimeout / 10, expectedOutput, processingTimeService, context);
assertThat(output, equalTo(expectedOutput));
// corresponds to step (5) of scenario (please see method-level Javadoc comment)
emitStreamElement(initialTime + 3 * idleTimeout + 2 * idleTimeout / 10, expectedOutput, processingTimeService, context);
assertThat(output, equalTo(expectedOutput));
// corresponds to step (6) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 4 * idleTimeout + idleTimeout / 10);
assertThat(output, equalTo(expectedOutput));
// corresponds to step (7) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 5 * idleTimeout + idleTimeout / 10);
expectedOutput.add(WatermarkStatus.IDLE);
assertThat(output, equalTo(expectedOutput));
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class StreamSourceContextIdleDetectionTests method testAutomaticWatermarkContext.
/**
* Test scenario (idleTimeout = 100, watermarkInterval = 40): (1) Start from 20 as initial time.
* (2) As soon as time reaches 120, status should have been toggled to IDLE. (3) After some
* arbitrary time (until 320), the status should remain IDLE, and no watermarks should have been
* emitted. (4) Emit a record at 330. Status should become ACTIVE. This should schedule a
* idleness detection to be fired at 430. (5) Emit another record at 350 (which is before the
* next check). This should make the idleness check pass. (6) Advance time to 430 and trigger
* idleness detection. The status should still be ACTIVE due to step (5). This should schedule a
* idleness detection to be fired at 530. (7) Advance time to 460, in which a watermark emission
* task should be fired. Idleness detection should have been "piggy-backed" in the task,
* allowing the status to be toggled to IDLE before the next actual idle detection task at 530.
*
* <p>Inline comments will refer to the corresponding tested steps in the scenario.
*/
@Test
public void testAutomaticWatermarkContext() throws Exception {
long watermarkInterval = 40;
long idleTimeout = 100;
long initialTime = 20;
TestProcessingTimeService processingTimeService = new TestProcessingTimeService();
processingTimeService.setCurrentTime(initialTime);
final List<StreamElement> output = new ArrayList<>();
final List<StreamElement> expectedOutput = new ArrayList<>();
SourceFunction.SourceContext<String> context = StreamSourceContexts.getSourceContext(TimeCharacteristic.IngestionTime, processingTimeService, new Object(), new CollectorOutput<String>(output), watermarkInterval, idleTimeout, true);
// -------------------------- begin test scenario --------------------------
// corresponds to step (2) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + watermarkInterval);
expectedOutput.add(new Watermark(processingTimeService.getCurrentProcessingTime() - (processingTimeService.getCurrentProcessingTime() % watermarkInterval)));
processingTimeService.setCurrentTime(initialTime + 2 * watermarkInterval);
expectedOutput.add(new Watermark(processingTimeService.getCurrentProcessingTime() - (processingTimeService.getCurrentProcessingTime() % watermarkInterval)));
processingTimeService.setCurrentTime(initialTime + idleTimeout);
expectedOutput.add(WatermarkStatus.IDLE);
assertEquals(expectedOutput, output);
// corresponds to step (3) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 3 * watermarkInterval);
processingTimeService.setCurrentTime(initialTime + 4 * watermarkInterval);
processingTimeService.setCurrentTime(initialTime + 2 * idleTimeout);
processingTimeService.setCurrentTime(initialTime + 6 * watermarkInterval);
processingTimeService.setCurrentTime(initialTime + 7 * watermarkInterval);
processingTimeService.setCurrentTime(initialTime + 3 * idleTimeout);
assertEquals(expectedOutput, output);
// corresponds to step (4) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 3 * idleTimeout + idleTimeout / 10);
switch(testMethod) {
case COLLECT:
expectedOutput.add(WatermarkStatus.ACTIVE);
context.collect("msg");
expectedOutput.add(new StreamRecord<>("msg", processingTimeService.getCurrentProcessingTime()));
expectedOutput.add(new Watermark(processingTimeService.getCurrentProcessingTime() - (processingTimeService.getCurrentProcessingTime() % watermarkInterval)));
assertEquals(expectedOutput, output);
break;
case COLLECT_WITH_TIMESTAMP:
expectedOutput.add(WatermarkStatus.ACTIVE);
context.collectWithTimestamp("msg", processingTimeService.getCurrentProcessingTime());
expectedOutput.add(new StreamRecord<>("msg", processingTimeService.getCurrentProcessingTime()));
expectedOutput.add(new Watermark(processingTimeService.getCurrentProcessingTime() - (processingTimeService.getCurrentProcessingTime() % watermarkInterval)));
assertEquals(expectedOutput, output);
break;
case EMIT_WATERMARK:
// for emitWatermark, since the watermark will be blocked,
// it should not make the status become active;
// from here on, the status should remain idle for the emitWatermark variant test
context.emitWatermark(new Watermark(processingTimeService.getCurrentProcessingTime()));
assertEquals(expectedOutput, output);
}
// corresponds to step (5) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 8 * watermarkInterval);
processingTimeService.setCurrentTime(initialTime + 3 * idleTimeout + 3 * idleTimeout / 10);
switch(testMethod) {
case COLLECT:
context.collect("msg");
expectedOutput.add(new StreamRecord<>("msg", processingTimeService.getCurrentProcessingTime()));
assertEquals(expectedOutput, output);
break;
case COLLECT_WITH_TIMESTAMP:
context.collectWithTimestamp("msg", processingTimeService.getCurrentProcessingTime());
expectedOutput.add(new StreamRecord<>("msg", processingTimeService.getCurrentProcessingTime()));
assertEquals(expectedOutput, output);
break;
case EMIT_WATERMARK:
context.emitWatermark(new Watermark(processingTimeService.getCurrentProcessingTime()));
assertEquals(expectedOutput, output);
}
processingTimeService.setCurrentTime(initialTime + 9 * watermarkInterval);
switch(testMethod) {
case COLLECT:
case COLLECT_WITH_TIMESTAMP:
expectedOutput.add(new Watermark(processingTimeService.getCurrentProcessingTime() - (processingTimeService.getCurrentProcessingTime() % watermarkInterval)));
assertEquals(expectedOutput, output);
break;
case EMIT_WATERMARK:
assertEquals(expectedOutput, output);
}
processingTimeService.setCurrentTime(initialTime + 10 * watermarkInterval);
switch(testMethod) {
case COLLECT:
case COLLECT_WITH_TIMESTAMP:
expectedOutput.add(new Watermark(processingTimeService.getCurrentProcessingTime() - (processingTimeService.getCurrentProcessingTime() % watermarkInterval)));
assertEquals(expectedOutput, output);
break;
case EMIT_WATERMARK:
assertEquals(expectedOutput, output);
}
// corresponds to step (6) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 4 * idleTimeout + idleTimeout / 10);
assertEquals(expectedOutput, output);
// corresponds to step (7) of scenario (please see method-level Javadoc comment)
processingTimeService.setCurrentTime(initialTime + 11 * watermarkInterval);
// emit watermark does not change the previous status
if (testMethod != TestMethod.EMIT_WATERMARK) {
expectedOutput.add(WatermarkStatus.IDLE);
}
assertEquals(expectedOutput, output);
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class StreamOperatorStateHandlerTest method testFailingBackendSnapshotMethod.
/**
* Tests that a failing snapshot method call to the keyed state backend will trigger the closing
* of the StateSnapshotContextSynchronousImpl and the cancellation of the
* OperatorSnapshotResult. The latter is supposed to also cancel all assigned futures.
*/
@Test
public void testFailingBackendSnapshotMethod() throws Exception {
final long checkpointId = 42L;
final long timestamp = 1L;
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) {
RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateManagedFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateRawFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateManagedFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateRawFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<StateObjectCollection<InputChannelStateHandle>>> inputChannelStateFuture = new CancelableFuture<>();
RunnableFuture<SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>>> resultSubpartitionStateFuture = new CancelableFuture<>();
OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures(keyedStateManagedFuture, keyedStateRawFuture, operatorStateManagedFuture, operatorStateRawFuture, inputChannelStateFuture, resultSubpartitionStateFuture);
StateSnapshotContextSynchronousImpl context = new TestStateSnapshotContextSynchronousImpl(checkpointId, timestamp, closeableRegistry);
context.getRawKeyedOperatorStateOutput();
context.getRawOperatorStateOutput();
StreamTaskStateInitializerImpl stateInitializer = new StreamTaskStateInitializerImpl(new MockEnvironmentBuilder().build(), new MemoryStateBackend());
StreamOperatorStateContext stateContext = stateInitializer.streamOperatorStateContext(new OperatorID(), "whatever", new TestProcessingTimeService(), new UnUsedKeyContext(), IntSerializer.INSTANCE, closeableRegistry, new InterceptingOperatorMetricGroup(), 1.0, false);
StreamOperatorStateHandler stateHandler = new StreamOperatorStateHandler(stateContext, new ExecutionConfig(), closeableRegistry);
final String keyedStateField = "keyedStateField";
final String operatorStateField = "operatorStateField";
CheckpointedStreamOperator checkpointedStreamOperator = new CheckpointedStreamOperator() {
@Override
public void initializeState(StateInitializationContext context) throws Exception {
context.getKeyedStateStore().getState(new ValueStateDescriptor<>(keyedStateField, LongSerializer.INSTANCE)).update(42L);
context.getOperatorStateStore().getListState(new ListStateDescriptor<>(operatorStateField, LongSerializer.INSTANCE)).add(42L);
}
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
throw new ExpectedTestException();
}
};
stateHandler.setCurrentKey("44");
stateHandler.initializeOperatorState(checkpointedStreamOperator);
assertThat(stateContext.operatorStateBackend().getRegisteredStateNames(), is(not(empty())));
assertThat(((AbstractKeyedStateBackend<?>) stateContext.keyedStateBackend()).numKeyValueStatesByName(), equalTo(1));
try {
stateHandler.snapshotState(checkpointedStreamOperator, Optional.of(stateContext.internalTimerServiceManager()), "42", 42, 42, CheckpointOptions.forCheckpointWithDefaultLocation(), new MemCheckpointStreamFactory(1024), operatorSnapshotResult, context, false);
fail("Exception expected.");
} catch (CheckpointException e) {
// as CheckpointException is wrapping the cause with SerializedThrowable
if (!ExceptionUtils.findThrowableWithMessage(e, ExpectedTestException.MESSAGE).isPresent()) {
throw e;
}
}
assertTrue(keyedStateManagedFuture.isCancelled());
assertTrue(keyedStateRawFuture.isCancelled());
assertTrue(context.getKeyedStateStreamFuture().isCancelled());
assertTrue(operatorStateManagedFuture.isCancelled());
assertTrue(operatorStateRawFuture.isCancelled());
assertTrue(context.getOperatorStateStreamFuture().isCancelled());
assertTrue(inputChannelStateFuture.isCancelled());
assertTrue(resultSubpartitionStateFuture.isCancelled());
stateHandler.dispose();
assertThat(stateContext.operatorStateBackend().getRegisteredBroadcastStateNames(), is(empty()));
assertThat(stateContext.operatorStateBackend().getRegisteredStateNames(), is(empty()));
assertThat(((AbstractKeyedStateBackend<?>) stateContext.keyedStateBackend()).numKeyValueStatesByName(), equalTo(0));
}
}
Aggregations