use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class TopicMetadataListenerTest method fetchTopicPartitionInformation.
@Test
void fetchTopicPartitionInformation() {
String topic = randomAlphabetic(10);
operator().createTopic(topic, 8);
TopicMetadataListener listener = new TopicMetadataListener(singletonList(topic));
SinkConfiguration configuration = sinkConfiguration(Duration.ofMinutes(10).toMillis());
TestProcessingTimeService timeService = new TestProcessingTimeService();
List<String> topics = listener.availableTopics();
assertThat(topics).isEmpty();
listener.open(configuration, timeService);
topics = listener.availableTopics();
List<String> desiredTopics = topicPartitions(topic, 8);
assertThat(topics).hasSize(8).isEqualTo(desiredTopics);
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class TopicMetadataListenerTest method listenOnPartitions.
@Test
void listenOnPartitions() throws Exception {
String topic = randomAlphabetic(10);
operator().createTopic(topic, 6);
List<String> partitions = topicPartitions(topic, 6);
TopicMetadataListener listener = new TopicMetadataListener(partitions);
long interval = Duration.ofMinutes(15).toMillis();
SinkConfiguration configuration = sinkConfiguration(interval);
TestProcessingTimeService timeService = new TestProcessingTimeService();
List<String> topics = listener.availableTopics();
assertEquals(topics, partitions);
listener.open(configuration, timeService);
topics = listener.availableTopics();
assertEquals(topics, partitions);
operator().increaseTopicPartitions(topic, 12);
timeService.advance(interval);
topics = listener.availableTopics();
assertEquals(topics, partitions);
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class AsyncSinkWriterTest method testThatTimeBasedBatchPicksUpAllRelevantItemsUpUntilExpiryOfTimer.
@Test
public void testThatTimeBasedBatchPicksUpAllRelevantItemsUpUntilExpiryOfTimer() throws Exception {
AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder().context(sinkInitContext).maxBatchSize(10).maxInFlightRequests(20).maxBatchSizeInBytes(10_000).maxTimeInBufferMS(100).maxRecordSizeInBytes(10_000).simulateFailures(true).build();
TestProcessingTimeService tpts = sinkInitContext.getTestProcessingTimeService();
for (int i = 0; i < 98; i++) {
tpts.setCurrentTime(i);
sink.write(String.valueOf(i));
}
tpts.setCurrentTime(99L);
assertEquals(90, res.size());
tpts.setCurrentTime(100L);
assertEquals(98, res.size());
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class AsyncSinkWriterTest method testThatIntermittentlyFailingEntriesShouldBeFlushedWithMainBatchInTimeBasedFlush.
@Test
public void testThatIntermittentlyFailingEntriesShouldBeFlushedWithMainBatchInTimeBasedFlush() throws Exception {
AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder().context(sinkInitContext).maxBatchSizeInBytes(10_000).maxTimeInBufferMS(100).maxRecordSizeInBytes(10_000).simulateFailures(true).build();
TestProcessingTimeService tpts = sinkInitContext.getTestProcessingTimeService();
tpts.setCurrentTime(0L);
sink.write("1");
sink.write("2");
sink.write("225");
tpts.setCurrentTime(100L);
assertEquals(2, res.size());
sink.write("3");
sink.write("4");
tpts.setCurrentTime(199L);
assertEquals(2, res.size());
tpts.setCurrentTime(200L);
assertEquals(5, res.size());
}
use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.
the class AsyncSinkWriterThrottlingTest method testSinkThroughputShouldThrottleToHalfBatchSize.
@Test
public void testSinkThroughputShouldThrottleToHalfBatchSize() throws Exception {
int maxBatchSize = 32;
int maxInFlightRequest = 10;
int numberOfBatchesToSend = 1000;
Queue<String> testRequests = getTestRequestsBuffer();
TestSinkInitContext context = new TestSinkInitContext();
TestProcessingTimeService tpts = context.getTestProcessingTimeService();
ThrottlingWriter writer = new ThrottlingWriter((elem, ctx) -> Long.valueOf(elem), context, maxBatchSize, maxInFlightRequest);
long currentTime = 0L;
tpts.setCurrentTime(currentTime);
// numberOfBatchesToSend should be high enough to overcome initial transient state
for (int i = 0; i < numberOfBatchesToSend; i++) {
removeBatchAndSend(writer, testRequests, maxBatchSize);
tpts.setCurrentTime(currentTime + 50);
currentTime += 50L;
}
/**
* Throttling limit should be maxBatchSize/2 , worst case margin on throttling (maxBatchSize
* / 2 + 1)->(maxBatchSize/4) or when scaling up (maxBatchSize/2) -> (maxBatchSize/2 + 10).
*/
Assertions.assertThat(writer.getInflightMessagesLimit()).isGreaterThanOrEqualTo(maxBatchSize / 4);
Assertions.assertThat(writer.getInflightMessagesLimit()).isLessThanOrEqualTo(maxBatchSize / 2 + 10);
}
Aggregations