use of org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface in project flink by apache.
the class StreamConsumerRegistrarTest method testStreamNotFoundWhenRegisteringThrowsException.
@Test
public void testStreamNotFoundWhenRegisteringThrowsException() throws Exception {
thrown.expect(ResourceNotFoundException.class);
KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.streamNotFound();
StreamConsumerRegistrar registrar = createRegistrar(kinesis, mock(FullJitterBackoff.class));
registrar.registerStreamConsumer(STREAM, "name");
}
use of org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface in project flink by apache.
the class StreamConsumerRegistrarTest method testRegistrationBackoffForLazy.
@Test
public void testRegistrationBackoffForLazy() throws Exception {
FullJitterBackoff backoff = mock(FullJitterBackoff.class);
KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.existingActiveConsumer();
Properties efoProperties = createEfoProperties();
efoProperties.setProperty(EFO_REGISTRATION_TYPE, LAZY.name());
FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(efoProperties, emptyList());
StreamConsumerRegistrar registrar = new StreamConsumerRegistrar(kinesis, configuration, backoff);
String result = registrar.registerStreamConsumer(STREAM, "name");
verify(backoff).sleep(anyLong());
assertEquals(STREAM_CONSUMER_ARN_EXISTING, result);
}
use of org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface in project flink by apache.
the class ShardConsumerFanOutTest method testShardConsumerRetriesGenericSdkError.
@Test
public void testShardConsumerRetriesGenericSdkError() throws Exception {
// Throws error after 5 records and there are 25 records available in the shard
KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(new SdkClientException(""));
int expectedNumberOfRecordsReadFromKinesisBeforeError = 25;
SequenceNumber startingSequenceNumber = new SequenceNumber("0");
Properties properties = efoProperties();
// Speed up test by reducing backoff time
properties.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_MAX, "1");
// SdkClientException will cause a retry, each retry will result in 5 more records being
// consumed
// The shard will consume all 25 records
assertNumberOfMessagesReceivedFromKinesis(expectedNumberOfRecordsReadFromKinesisBeforeError, kinesis, startingSequenceNumber, properties);
}
use of org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface in project flink by apache.
the class ShardConsumerFanOutTest method testShardConsumerExitsWhenRecordPublisherIsInterrupted.
@Test
public void testShardConsumerExitsWhenRecordPublisherIsInterrupted() throws Exception {
// Throws error after 5 records
KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(new SdkInterruptedException(null));
int expectedNumberOfRecordsReadFromKinesisBeforeError = 5;
SequenceNumber startingSequenceNumber = new SequenceNumber("0");
SequenceNumber expectedLastProcessSequenceNumber = new SequenceNumber("5");
// SdkInterruptedException will terminate the consumer, it will not retry and read only the
// first 5 records
ShardConsumerTestUtils.assertNumberOfMessagesReceivedFromKinesis(expectedNumberOfRecordsReadFromKinesisBeforeError, new FanOutRecordPublisherFactory(kinesis), startingSequenceNumber, efoProperties(), expectedLastProcessSequenceNumber);
}
use of org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface in project flink by apache.
the class FanOutShardSubscriberTest method testTimeoutSubscribingToShard.
@Test
public void testTimeoutSubscribingToShard() throws Exception {
thrown.expect(FanOutShardSubscriber.RecoverableFanOutSubscriberException.class);
thrown.expectMessage("Timed out acquiring subscription");
KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.failsToAcquireSubscription();
FanOutShardSubscriber subscriber = new FanOutShardSubscriber("consumerArn", "shardId", kinesis, Duration.ofMillis(1));
StartingPosition startingPosition = StartingPosition.builder().build();
subscriber.subscribeToShardAndConsumeRecords(startingPosition, event -> {
});
}
Aggregations