use of org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer in project flink by apache.
the class SourceOperatorAlignmentTest method testReportedWatermarkDoNotDecrease.
@Test
public void testReportedWatermarkDoNotDecrease() throws Exception {
operator.initializeState(context.createStateContext());
operator.open();
MockSourceSplit split1 = new MockSourceSplit(2);
MockSourceSplit split2 = new MockSourceSplit(3);
int record1 = 2000;
int record2 = 1000;
split1.addRecord(record1);
split2.addRecord(record2);
operator.handleOperatorEvent(new AddSplitEvent<>(Collections.singletonList(split1), new MockSourceSplitSerializer()));
CollectingDataOutput<Integer> actualOutput = new CollectingDataOutput<>();
operator.emitNext(actualOutput);
context.getTimeService().advance(1);
assertLatestReportedWatermarkEvent(record1);
operator.handleOperatorEvent(new AddSplitEvent<>(Collections.singletonList(split2), new MockSourceSplitSerializer()));
operator.emitNext(actualOutput);
context.getTimeService().advance(1);
assertLatestReportedWatermarkEvent(record1);
}
use of org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer in project flink by apache.
the class SourceOperatorAlignmentTest method testWatermarkAlignment.
@Test
public void testWatermarkAlignment() throws Exception {
operator.initializeState(context.createStateContext());
operator.open();
MockSourceSplit newSplit = new MockSourceSplit(2);
int record1 = 1000;
int record2 = 2000;
int record3 = 3000;
newSplit.addRecord(record1);
newSplit.addRecord(record2);
newSplit.addRecord(record3);
operator.handleOperatorEvent(new AddSplitEvent<>(Collections.singletonList(newSplit), new MockSourceSplitSerializer()));
CollectingDataOutput<Integer> actualOutput = new CollectingDataOutput<>();
List<Integer> expectedOutput = new ArrayList<>();
assertThat(operator.emitNext(actualOutput), is(DataInputStatus.MORE_AVAILABLE));
expectedOutput.add(record1);
context.getTimeService().advance(1);
assertLatestReportedWatermarkEvent(record1);
assertOutput(actualOutput, expectedOutput);
assertTrue(operator.isAvailable());
operator.handleOperatorEvent(new WatermarkAlignmentEvent(record1 - 1));
assertFalse(operator.isAvailable());
assertThat(operator.emitNext(actualOutput), is(DataInputStatus.NOTHING_AVAILABLE));
assertLatestReportedWatermarkEvent(record1);
assertOutput(actualOutput, expectedOutput);
assertFalse(operator.isAvailable());
operator.handleOperatorEvent(new WatermarkAlignmentEvent(record1 + 1));
assertTrue(operator.isAvailable());
operator.emitNext(actualOutput);
// Try to poll a record second time. Technically speaking previous emitNext call could have
// already switch the operator status to unavailable, but that's an implementation detail.
// However, this second call can not emit anything and should after that second call
// operator must be unavailable.
assertThat(operator.emitNext(actualOutput), is(DataInputStatus.NOTHING_AVAILABLE));
expectedOutput.add(record2);
context.getTimeService().advance(1);
assertLatestReportedWatermarkEvent(record2);
assertOutput(actualOutput, expectedOutput);
assertFalse(operator.isAvailable());
}
use of org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer in project flink by apache.
the class SourceOperatorAlignmentTest method testWatermarkAlignmentWithIdleness.
@Test
public void testWatermarkAlignmentWithIdleness() throws Exception {
// we use a separate context, because we need to enable idleness
try (SourceOperatorTestContext context = new SourceOperatorTestContext(true, WatermarkStrategy.forGenerator(ctx -> new PunctuatedGenerator(PunctuatedGenerator.GenerationMode.ODD)).withWatermarkAlignment("group1", Duration.ofMillis(100), Duration.ofMillis(1)).withTimestampAssigner((r, t) -> r))) {
final SourceOperator<Integer, MockSourceSplit> operator = context.getOperator();
operator.initializeState(context.createStateContext());
operator.open();
MockSourceSplit newSplit = new MockSourceSplit(2);
int record1 = 1;
newSplit.addRecord(record1);
operator.handleOperatorEvent(new AddSplitEvent<>(Collections.singletonList(newSplit), new MockSourceSplitSerializer()));
CollectingDataOutput<Integer> actualOutput = new CollectingDataOutput<>();
List<Integer> expectedOutput = new ArrayList<>();
assertThat(operator.emitNext(actualOutput), is(DataInputStatus.MORE_AVAILABLE));
expectedOutput.add(record1);
context.getTimeService().advance(1);
assertLatestReportedWatermarkEvent(context, record1);
assertOutput(actualOutput, expectedOutput);
assertTrue(operator.isAvailable());
// source becomes idle, it should report Long.MAX_VALUE as the watermark
assertThat(operator.emitNext(actualOutput), is(DataInputStatus.NOTHING_AVAILABLE));
context.getTimeService().advance(1);
assertLatestReportedWatermarkEvent(context, Long.MAX_VALUE);
// it is easier to create a new split than add records the old one. The old one is
// serialized, when sending the AddSplitEvent, so it is not as easy as
// newSplit.addRecord
newSplit = new MockSourceSplit(3);
// even timestamp -> no watermarks
int record2 = 2;
newSplit.addRecord(record2);
operator.handleOperatorEvent(new AddSplitEvent<>(Collections.singletonList(newSplit), new MockSourceSplitSerializer()));
assertThat(operator.emitNext(actualOutput), is(DataInputStatus.MORE_AVAILABLE));
expectedOutput.add(record2);
context.getTimeService().advance(1);
// becomes active again, should go back to the previously emitted
// watermark, as the record2 does not emit watermarks
assertLatestReportedWatermarkEvent(context, record1);
assertOutput(actualOutput, expectedOutput);
assertTrue(operator.isAvailable());
}
}
use of org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer in project flink by apache.
the class SourceOperatorTest method testSnapshotState.
@Test
public void testSnapshotState() throws Exception {
StateInitializationContext stateContext = context.createStateContext();
operator.initializeState(stateContext);
operator.open();
MockSourceSplit newSplit = new MockSourceSplit((2));
operator.handleOperatorEvent(new AddSplitEvent<>(Collections.singletonList(newSplit), new MockSourceSplitSerializer()));
operator.snapshotState(new StateSnapshotContextSynchronousImpl(100L, 100L));
// Verify the splits in state.
List<MockSourceSplit> splitsInState = CollectionUtil.iterableToList(operator.getReaderState().get());
assertEquals(Arrays.asList(SourceOperatorTestContext.MOCK_SPLIT, newSplit), splitsInState);
}
use of org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer in project flink by apache.
the class SourceOperatorTestContext method createStateContext.
public StateInitializationContext createStateContext() throws Exception {
// Create a mock split.
byte[] serializedSplitWithVersion = SimpleVersionedSerialization.writeVersionAndSerialize(new MockSourceSplitSerializer(), MOCK_SPLIT);
// Crate the state context.
OperatorStateStore operatorStateStore = createOperatorStateStore();
StateInitializationContext stateContext = new StateInitializationContextImpl(null, operatorStateStore, null, null, null);
// Update the context.
stateContext.getOperatorStateStore().getListState(SourceOperator.SPLITS_STATE_DESC).update(Collections.singletonList(serializedSplitWithVersion));
return stateContext;
}
Aggregations