use of io.confluent.ksql.rest.entity.ConsumerPartitionOffsets in project ksql by confluentinc.
the class ListSourceExecutor method consumerPartitionOffsets.
private static List<ConsumerPartitionOffsets> consumerPartitionOffsets(final TopicDescription topicDescription, final Map<TopicPartition, Long> topicAndStartOffsets, final Map<TopicPartition, Long> topicAndEndOffsets, final Map<TopicPartition, OffsetAndMetadata> topicAndConsumerOffsets) {
final List<ConsumerPartitionOffsets> consumerPartitionOffsets = new ArrayList<>();
for (TopicPartitionInfo topicPartitionInfo : topicDescription.partitions()) {
final TopicPartition tp = new TopicPartition(topicDescription.name(), topicPartitionInfo.partition());
final Long startOffsetResultInfo = topicAndStartOffsets.get(tp);
final Long endOffsetResultInfo = topicAndEndOffsets.get(tp);
final OffsetAndMetadata offsetAndMetadata = topicAndConsumerOffsets.get(tp);
consumerPartitionOffsets.add(new ConsumerPartitionOffsets(topicPartitionInfo.partition(), startOffsetResultInfo, endOffsetResultInfo, // null when consumer has not poll yet from a topic-partition
offsetAndMetadata != null ? offsetAndMetadata.offset() : 0));
}
return consumerPartitionOffsets;
}
use of io.confluent.ksql.rest.entity.ConsumerPartitionOffsets in project ksql by confluentinc.
the class ConsoleTest method shouldPrintTopicDescribeExtended.
@Test
public void shouldPrintTopicDescribeExtended() {
// Given:
final List<RunningQuery> readQueries = ImmutableList.of(new RunningQuery("read query", ImmutableSet.of("sink1"), ImmutableSet.of("sink1 topic"), new QueryId("readId"), queryStatusCount, KsqlConstants.KsqlQueryType.PERSISTENT));
final List<RunningQuery> writeQueries = ImmutableList.of(new RunningQuery("write query", ImmutableSet.of("sink2"), ImmutableSet.of("sink2 topic"), new QueryId("writeId"), queryStatusCount, KsqlConstants.KsqlQueryType.PERSISTENT));
final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of(new SourceDescriptionEntity("e", new SourceDescription("TestSource", Optional.empty(), readQueries, writeQueries, buildTestSchema(SqlTypes.STRING), DataSourceType.KTABLE.getKsqlType(), "2000-01-01", "stats", "errors", true, "json", "avro", "kafka-topic", 2, 1, "sql statement text", ImmutableList.of(new QueryOffsetSummary("consumer1", ImmutableList.of(new QueryTopicOffsetSummary("kafka-topic", ImmutableList.of(new ConsumerPartitionOffsets(0, 100, 900, 800), new ConsumerPartitionOffsets(1, 50, 900, 900))), new QueryTopicOffsetSummary("kafka-topic-2", ImmutableList.of(new ConsumerPartitionOffsets(0, 0, 90, 80), new ConsumerPartitionOffsets(1, 10, 90, 90))))), new QueryOffsetSummary("consumer2", ImmutableList.of())), ImmutableList.of("S1", "S2")), Collections.emptyList())));
// When:
console.printKsqlEntityList(entityList);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
Aggregations