use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class KinesisDataFetcherTest method testStreamToLastSeenShardStateIsCorrectlySetWhenNotRestoringFromFailure.
@Test
public void testStreamToLastSeenShardStateIsCorrectlySetWhenNotRestoringFromFailure() throws Exception {
List<String> fakeStreams = new LinkedList<>();
fakeStreams.add("fakeStream1");
fakeStreams.add("fakeStream2");
fakeStreams.add("fakeStream3");
fakeStreams.add("fakeStream4");
HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest = KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams);
Map<String, Integer> streamToShardCount = new HashMap<>();
Random rand = new Random();
for (String fakeStream : fakeStreams) {
streamToShardCount.put(fakeStream, rand.nextInt(5) + 1);
}
final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(fakeStreams, new TestSourceContext<>(), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), 10, 2, new AtomicReference<>(), new LinkedList<>(), subscribedStreamsToLastSeenShardIdsUnderTest, FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(streamToShardCount));
final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(TestUtils.getStandardProperties(), fetcher, 1, 0);
CheckedThread consumerThread = new CheckedThread() {
@Override
public void go() throws Exception {
consumer.run(new TestSourceContext<>());
}
};
consumerThread.start();
fetcher.waitUntilRun();
consumer.cancel();
consumerThread.sync();
// assert that the streams tracked in the state are identical to the subscribed streams
Set<String> streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet();
assertEquals(fakeStreams.size(), streamsInState.size());
assertTrue(streamsInState.containsAll(fakeStreams));
// assert that the last seen shards in state is correctly set
for (Map.Entry<String, String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
assertEquals(KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey()) - 1), streamToLastSeenShard.getValue());
}
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class KinesisDataFetcherTest method testSkipCorruptedRecord.
@Test
public void testSkipCorruptedRecord() throws Exception {
final String stream = "fakeStream";
final int numShards = 3;
final LinkedList<KinesisStreamShardState> testShardStates = new LinkedList<>();
final TestSourceContext<String> sourceContext = new TestSourceContext<>();
final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(singletonList(stream), sourceContext, TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), 1, 0, new AtomicReference<>(), testShardStates, new HashMap<>(), FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(Collections.singletonMap(stream, numShards)));
// FlinkKinesisConsumer is responsible for setting up the fetcher before it can be run;
// run the consumer until it reaches the point where the fetcher starts to run
final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(TestUtils.getStandardProperties(), fetcher, 1, 0);
CheckedThread consumerThread = new CheckedThread() {
@Override
public void go() throws Exception {
consumer.run(new TestSourceContext<>());
}
};
consumerThread.start();
fetcher.waitUntilRun();
consumer.cancel();
consumerThread.sync();
assertEquals(numShards, testShardStates.size());
for (int i = 0; i < numShards; i++) {
fetcher.emitRecordAndUpdateState("record-" + i, 10L, i, new SequenceNumber("seq-num-1"));
assertEquals(new SequenceNumber("seq-num-1"), testShardStates.get(i).getLastProcessedSequenceNum());
assertEquals(new StreamRecord<>("record-" + i, 10L), sourceContext.removeLatestOutput());
}
// emitting a null (i.e., a corrupt record) should not produce any output, but still have
// the shard state updated
fetcher.emitRecordAndUpdateState(null, 10L, 1, new SequenceNumber("seq-num-2"));
assertEquals(new SequenceNumber("seq-num-2"), testShardStates.get(1).getLastProcessedSequenceNum());
// no output should have been collected
assertNull(sourceContext.removeLatestOutput());
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class FlinkKafkaConsumerBaseTest method testSnapshotStateWithCommitOnCheckpointsEnabled.
@Test
@SuppressWarnings("unchecked")
public void testSnapshotStateWithCommitOnCheckpointsEnabled() throws Exception {
// --------------------------------------------------------------------
// prepare fake states
// --------------------------------------------------------------------
final HashMap<KafkaTopicPartition, Long> state1 = new HashMap<>();
state1.put(new KafkaTopicPartition("abc", 13), 16768L);
state1.put(new KafkaTopicPartition("def", 7), 987654321L);
final HashMap<KafkaTopicPartition, Long> state2 = new HashMap<>();
state2.put(new KafkaTopicPartition("abc", 13), 16770L);
state2.put(new KafkaTopicPartition("def", 7), 987654329L);
final HashMap<KafkaTopicPartition, Long> state3 = new HashMap<>();
state3.put(new KafkaTopicPartition("abc", 13), 16780L);
state3.put(new KafkaTopicPartition("def", 7), 987654377L);
// --------------------------------------------------------------------
final MockFetcher<String> fetcher = new MockFetcher<>(state1, state2, state3);
final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(fetcher, mock(AbstractPartitionDiscoverer.class), false);
final TestingListState<Serializable> listState = new TestingListState<>();
// setup and run the consumer; wait until the consumer reaches the main fetch loop before
// continuing test
setupConsumer(consumer, false, listState, true, 0, 1);
final CheckedThread runThread = new CheckedThread() {
@Override
public void go() throws Exception {
consumer.run(new TestSourceContext<>());
}
};
runThread.start();
fetcher.waitUntilRun();
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
// checkpoint 1
consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138));
HashMap<KafkaTopicPartition, Long> snapshot1 = new HashMap<>();
for (Serializable serializable : listState.get()) {
Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable;
snapshot1.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1);
}
assertEquals(state1, snapshot1);
assertEquals(1, consumer.getPendingOffsetsToCommit().size());
assertEquals(state1, consumer.getPendingOffsetsToCommit().get(138L));
// checkpoint 2
consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140));
HashMap<KafkaTopicPartition, Long> snapshot2 = new HashMap<>();
for (Serializable serializable : listState.get()) {
Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable;
snapshot2.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1);
}
assertEquals(state2, snapshot2);
assertEquals(2, consumer.getPendingOffsetsToCommit().size());
assertEquals(state2, consumer.getPendingOffsetsToCommit().get(140L));
// ack checkpoint 1
consumer.notifyCheckpointComplete(138L);
assertEquals(1, consumer.getPendingOffsetsToCommit().size());
assertTrue(consumer.getPendingOffsetsToCommit().containsKey(140L));
assertEquals(state1, fetcher.getAndClearLastCommittedOffsets());
assertEquals(1, fetcher.getCommitCount());
// checkpoint 3
consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141));
HashMap<KafkaTopicPartition, Long> snapshot3 = new HashMap<>();
for (Serializable serializable : listState.get()) {
Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable;
snapshot3.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1);
}
assertEquals(state3, snapshot3);
assertEquals(2, consumer.getPendingOffsetsToCommit().size());
assertEquals(state3, consumer.getPendingOffsetsToCommit().get(141L));
// ack checkpoint 3, subsumes number 2
consumer.notifyCheckpointComplete(141L);
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
assertEquals(state3, fetcher.getAndClearLastCommittedOffsets());
assertEquals(2, fetcher.getCommitCount());
// invalid checkpoint
consumer.notifyCheckpointComplete(666);
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
assertNull(fetcher.getAndClearLastCommittedOffsets());
assertEquals(2, fetcher.getCommitCount());
consumer.cancel();
runThread.sync();
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class FlinkKafkaConsumerBaseTest method testSnapshotStateWithCommitOnCheckpointsDisabled.
@Test
@SuppressWarnings("unchecked")
public void testSnapshotStateWithCommitOnCheckpointsDisabled() throws Exception {
// --------------------------------------------------------------------
// prepare fake states
// --------------------------------------------------------------------
final HashMap<KafkaTopicPartition, Long> state1 = new HashMap<>();
state1.put(new KafkaTopicPartition("abc", 13), 16768L);
state1.put(new KafkaTopicPartition("def", 7), 987654321L);
final HashMap<KafkaTopicPartition, Long> state2 = new HashMap<>();
state2.put(new KafkaTopicPartition("abc", 13), 16770L);
state2.put(new KafkaTopicPartition("def", 7), 987654329L);
final HashMap<KafkaTopicPartition, Long> state3 = new HashMap<>();
state3.put(new KafkaTopicPartition("abc", 13), 16780L);
state3.put(new KafkaTopicPartition("def", 7), 987654377L);
// --------------------------------------------------------------------
final MockFetcher<String> fetcher = new MockFetcher<>(state1, state2, state3);
final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(fetcher, mock(AbstractPartitionDiscoverer.class), false);
// disable offset committing
consumer.setCommitOffsetsOnCheckpoints(false);
final TestingListState<Serializable> listState = new TestingListState<>();
// setup and run the consumer; wait until the consumer reaches the main fetch loop before
// continuing test
setupConsumer(consumer, false, listState, true, 0, 1);
final CheckedThread runThread = new CheckedThread() {
@Override
public void go() throws Exception {
consumer.run(new TestSourceContext<>());
}
};
runThread.start();
fetcher.waitUntilRun();
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
// checkpoint 1
consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138));
HashMap<KafkaTopicPartition, Long> snapshot1 = new HashMap<>();
for (Serializable serializable : listState.get()) {
Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable;
snapshot1.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1);
}
assertEquals(state1, snapshot1);
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
// checkpoint 2
consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140));
HashMap<KafkaTopicPartition, Long> snapshot2 = new HashMap<>();
for (Serializable serializable : listState.get()) {
Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable;
snapshot2.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1);
}
assertEquals(state2, snapshot2);
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
// ack checkpoint 1
consumer.notifyCheckpointComplete(138L);
assertEquals(0, fetcher.getCommitCount());
// no offsets should be committed
assertNull(fetcher.getAndClearLastCommittedOffsets());
// checkpoint 3
consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141));
HashMap<KafkaTopicPartition, Long> snapshot3 = new HashMap<>();
for (Serializable serializable : listState.get()) {
Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable;
snapshot3.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1);
}
assertEquals(state3, snapshot3);
assertEquals(0, consumer.getPendingOffsetsToCommit().size());
// ack checkpoint 3, subsumes number 2
consumer.notifyCheckpointComplete(141L);
assertEquals(0, fetcher.getCommitCount());
// no offsets should be committed
assertNull(fetcher.getAndClearLastCommittedOffsets());
// invalid checkpoint
consumer.notifyCheckpointComplete(666);
assertEquals(0, fetcher.getCommitCount());
// no offsets should be committed
assertNull(fetcher.getAndClearLastCommittedOffsets());
consumer.cancel();
runThread.sync();
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class KryoSerializerConcurrencyTest method testConcurrentUseOfSerializer.
@Test
public void testConcurrentUseOfSerializer() throws Exception {
final KryoSerializer<String> serializer = new KryoSerializer<>(String.class, new ExecutionConfig());
final BlockerSync sync = new BlockerSync();
final DataOutputView regularOut = new DataOutputSerializer(32);
final DataOutputView lockingOut = new LockingView(sync);
// this thread serializes and gets stuck there
final CheckedThread thread = new CheckedThread("serializer") {
@Override
public void go() throws Exception {
serializer.serialize("a value", lockingOut);
}
};
thread.start();
sync.awaitBlocker();
// this should fail with an exception
try {
serializer.serialize("value", regularOut);
fail("should have failed with an exception");
} catch (IllegalStateException e) {
// expected
} finally {
// release the thread that serializes
sync.releaseBlocker();
}
// this propagates exceptions from the spawned thread
thread.sync();
}
Aggregations