use of org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext in project flink by apache.
the class KinesisDataFetcherTest method testPeriodicWatermark.
@Test
public void testPeriodicWatermark() {
final MutableLong clock = new MutableLong();
final MutableBoolean isTemporaryIdle = new MutableBoolean();
final List<Watermark> watermarks = new ArrayList<>();
String fakeStream1 = "fakeStream1";
StreamShardHandle shardHandle = new StreamShardHandle(fakeStream1, new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0)));
TestSourceContext<String> sourceContext = new TestSourceContext<String>() {
@Override
public void emitWatermark(Watermark mark) {
watermarks.add(mark);
}
@Override
public void markAsTemporarilyIdle() {
isTemporaryIdle.setTrue();
}
};
HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest = new HashMap<>();
final KinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<String>(singletonList(fakeStream1), sourceContext, new java.util.Properties(), new KinesisDeserializationSchemaWrapper<>(new org.apache.flink.streaming.util.serialization.SimpleStringSchema()), 1, 1, new AtomicReference<>(), new LinkedList<>(), subscribedStreamsToLastSeenShardIdsUnderTest, FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(new HashMap<>())) {
@Override
protected long getCurrentTimeMillis() {
return clock.getValue();
}
};
Whitebox.setInternalState(fetcher, "periodicWatermarkAssigner", watermarkAssigner);
SequenceNumber seq = new SequenceNumber("fakeSequenceNumber");
// register shards to subsequently emit records
int shardIndex = fetcher.registerNewSubscribedShardState(new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(shardHandle), shardHandle, seq));
StreamRecord<String> record1 = new StreamRecord<>(String.valueOf(Long.MIN_VALUE), Long.MIN_VALUE);
fetcher.emitRecordAndUpdateState(record1.getValue(), record1.getTimestamp(), shardIndex, seq);
Assert.assertEquals(record1, sourceContext.getCollectedOutputs().poll());
fetcher.emitWatermark();
Assert.assertTrue("potential watermark equals previous watermark", watermarks.isEmpty());
StreamRecord<String> record2 = new StreamRecord<>(String.valueOf(1), 1);
fetcher.emitRecordAndUpdateState(record2.getValue(), record2.getTimestamp(), shardIndex, seq);
Assert.assertEquals(record2, sourceContext.getCollectedOutputs().poll());
fetcher.emitWatermark();
Assert.assertFalse("watermark advanced", watermarks.isEmpty());
Assert.assertEquals(new Watermark(record2.getTimestamp()), watermarks.remove(0));
Assert.assertFalse("not idle", isTemporaryIdle.booleanValue());
// test idle timeout
long idleTimeout = 10;
// advance clock idleTimeout
clock.add(idleTimeout + 1);
fetcher.emitWatermark();
Assert.assertFalse("not idle", isTemporaryIdle.booleanValue());
Assert.assertTrue("not idle, no new watermark", watermarks.isEmpty());
// activate idle timeout
Whitebox.setInternalState(fetcher, "shardIdleIntervalMillis", idleTimeout);
fetcher.emitWatermark();
Assert.assertTrue("idle", isTemporaryIdle.booleanValue());
Assert.assertTrue("idle, no watermark", watermarks.isEmpty());
}
use of org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext in project flink by apache.
the class ShardConsumerTestUtils method assertNumberOfMessagesReceivedFromKinesis.
public static ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis(final int expectedNumberOfMessages, final RecordPublisherFactory recordPublisherFactory, final SequenceNumber startingSequenceNumber, final Properties consumerProperties, final SequenceNumber expectedLastProcessedSequenceNum, final AbstractMetricGroup metricGroup) throws InterruptedException {
ShardConsumerMetricsReporter shardMetricsReporter = new ShardConsumerMetricsReporter(metricGroup);
StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
subscribedShardsStateUnderTest.add(new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard), fakeToBeConsumedShard, startingSequenceNumber));
TestSourceContext<String> sourceContext = new TestSourceContext<>();
KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema());
TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(Collections.singletonList("fakeStream"), sourceContext, consumerProperties, deserializationSchema, 10, 2, new AtomicReference<>(), subscribedShardsStateUnderTest, KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")), Mockito.mock(KinesisProxyInterface.class), Mockito.mock(KinesisProxyV2Interface.class));
final StreamShardHandle shardHandle = subscribedShardsStateUnderTest.get(0).getStreamShardHandle();
final SequenceNumber lastProcessedSequenceNum = subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum();
final StartingPosition startingPosition = AWSUtil.getStartingPosition(lastProcessedSequenceNum, consumerProperties);
final RecordPublisher recordPublisher = recordPublisherFactory.create(startingPosition, fetcher.getConsumerConfiguration(), metricGroup, shardHandle);
int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
new ShardConsumer<>(fetcher, recordPublisher, shardIndex, shardHandle, lastProcessedSequenceNum, shardMetricsReporter, deserializationSchema).run();
assertEquals(expectedNumberOfMessages, sourceContext.getCollectedOutputs().size());
assertEquals(expectedLastProcessedSequenceNum, subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
return shardMetricsReporter;
}
use of org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext in project flink by apache.
the class KinesisDataFetcherTest method testCancelDuringDiscovery.
@Test
public void testCancelDuringDiscovery() throws Exception {
final String stream = "fakeStream";
final int numShards = 3;
Properties standardProperties = TestUtils.getStandardProperties();
standardProperties.setProperty(SHARD_DISCOVERY_INTERVAL_MILLIS, "10000000");
final LinkedList<KinesisStreamShardState> testShardStates = new LinkedList<>();
final TestSourceContext<String> sourceContext = new TestSourceContext<>();
TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<String>(singletonList(stream), sourceContext, standardProperties, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), 1, 0, new AtomicReference<>(), testShardStates, new HashMap<>(), FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(Collections.singletonMap(stream, numShards)));
// FlinkKinesisConsumer is responsible for setting up the fetcher before it can be run;
// run the consumer until it reaches the point where the fetcher starts to run
final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(TestUtils.getStandardProperties(), fetcher, 1, 0);
CheckedThread consumerThread = new CheckedThread() {
@Override
public void go() throws Exception {
consumer.run(new TestSourceContext<>());
}
};
consumerThread.start();
// wait for the second discovery to be triggered, that has a high probability to be inside
// discovery sleep (10k s)
fetcher.waitUntilDiscovery(2);
Thread.sleep(1000);
consumer.cancel();
consumerThread.sync();
}
use of org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext in project flink by apache.
the class KinesisDataFetcherTest method testShardToSubtaskMappingWithCustomHashFunction.
// ----------------------------------------------------------------------
// Tests shard distribution with custom hash function
// ----------------------------------------------------------------------
@Test
public void testShardToSubtaskMappingWithCustomHashFunction() throws Exception {
int totalCountOfSubtasks = 10;
int shardCount = 3;
for (int i = 0; i < 2; i++) {
final int hash = i;
final KinesisShardAssigner allShardsSingleSubtaskFn = (shard, subtasks) -> hash;
Map<String, Integer> streamToShardCount = new HashMap<>();
List<String> fakeStreams = new LinkedList<>();
fakeStreams.add("fakeStream");
streamToShardCount.put("fakeStream", shardCount);
for (int j = 0; j < totalCountOfSubtasks; j++) {
int subtaskIndex = j;
// subscribe with default hashing
final TestableKinesisDataFetcher fetcher = new TestableKinesisDataFetcher(fakeStreams, new TestSourceContext<>(), new Properties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), totalCountOfSubtasks, subtaskIndex, new AtomicReference<>(), new LinkedList<>(), KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams), FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(streamToShardCount));
Whitebox.setInternalState(fetcher, "shardAssigner", // override hashing
allShardsSingleSubtaskFn);
List<StreamShardHandle> shards = fetcher.discoverNewShardsToSubscribe();
fetcher.shutdownFetcher();
String msg = String.format("for hash=%d, subtask=%d", hash, subtaskIndex);
if (j == i) {
assertEquals(msg, shardCount, shards.size());
} else {
assertEquals(msg, 0, shards.size());
}
}
}
}
use of org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext in project flink by apache.
the class KinesisDataFetcherTest method testSkipCorruptedRecord.
@Test
public void testSkipCorruptedRecord() throws Exception {
final String stream = "fakeStream";
final int numShards = 3;
final LinkedList<KinesisStreamShardState> testShardStates = new LinkedList<>();
final TestSourceContext<String> sourceContext = new TestSourceContext<>();
final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(singletonList(stream), sourceContext, TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), 1, 0, new AtomicReference<>(), testShardStates, new HashMap<>(), FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(Collections.singletonMap(stream, numShards)));
// FlinkKinesisConsumer is responsible for setting up the fetcher before it can be run;
// run the consumer until it reaches the point where the fetcher starts to run
final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(TestUtils.getStandardProperties(), fetcher, 1, 0);
CheckedThread consumerThread = new CheckedThread() {
@Override
public void go() throws Exception {
consumer.run(new TestSourceContext<>());
}
};
consumerThread.start();
fetcher.waitUntilRun();
consumer.cancel();
consumerThread.sync();
assertEquals(numShards, testShardStates.size());
for (int i = 0; i < numShards; i++) {
fetcher.emitRecordAndUpdateState("record-" + i, 10L, i, new SequenceNumber("seq-num-1"));
assertEquals(new SequenceNumber("seq-num-1"), testShardStates.get(i).getLastProcessedSequenceNum());
assertEquals(new StreamRecord<>("record-" + i, 10L), sourceContext.removeLatestOutput());
}
// emitting a null (i.e., a corrupt record) should not produce any output, but still have
// the shard state updated
fetcher.emitRecordAndUpdateState(null, 10L, 1, new SequenceNumber("seq-num-2"));
assertEquals(new SequenceNumber("seq-num-2"), testShardStates.get(1).getLastProcessedSequenceNum());
// no output should have been collected
assertNull(sourceContext.removeLatestOutput());
}
Aggregations