Search in sources :

Example 16 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.

the class ConsumerCollectorTest method shouldDisplayRateThroughput.

@Test
public void shouldDisplayRateThroughput() throws Exception {
    // 
    ConsumerCollector collector = new ConsumerCollector();
    collector.configure(new Metrics(), "group", new SystemTime());
    for (int i = 0; i < 100; i++) {
        Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = ImmutableMap.of(new TopicPartition(TEST_TOPIC, 1), Arrays.asList(new ConsumerRecord<>(TEST_TOPIC, 1, i, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890")));
        ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records);
        collector.onConsume(consumerRecords);
    }
    Collection<TopicSensors.Stat> stats = collector.stats(TEST_TOPIC, false);
    assertNotNull(stats);
    assertThat(stats.toString(), containsString("name=consumer-messages-per-sec,"));
    assertThat(stats.toString(), containsString("total-messages, value=100.0"));
}
Also used : ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SystemTime(io.confluent.common.utils.SystemTime) Test(org.junit.Test)

Example 17 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.

the class MetricCollectorsTest method shouldAggregateConsumptionStatsByQuery.

@Test
public void shouldAggregateConsumptionStatsByQuery() throws Exception {
    ConsumerCollector collector1 = new ConsumerCollector();
    collector1.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
    ConsumerCollector collector2 = new ConsumerCollector();
    collector2.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
    ConsumerCollector collector3 = new ConsumerCollector();
    collector3.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "group2"));
    Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = new HashMap<>();
    List<ConsumerRecord<Object, Object>> recordList = new ArrayList<>();
    for (int i = 0; i < 500; i++) {
        recordList.add(new ConsumerRecord<>(TEST_TOPIC, 1, 1, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890"));
    }
    records.put(new TopicPartition(TEST_TOPIC, 1), recordList);
    ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records);
    collector1.onConsume(consumerRecords);
    collector2.onConsume(consumerRecords);
    collector3.onConsume(consumerRecords);
    List<Double> consumptionByQuery = new ArrayList<>(MetricCollectors.currentConsumptionRateByQuery());
    consumptionByQuery.sort(Comparator.naturalOrder());
    // Each query will have a unique consumer group id. In this case we have two queries and 3
    // consumers. So we should expect two results from the currentConsumptionRateByQuery call.
    assertEquals(2, consumptionByQuery.size());
    // Same as the above test, the kafka `Rate` measurable stat reports the rate as a tenth
    // of what it should be because all the samples haven't been filled out yet.
    assertEquals(5.0, Math.floor(consumptionByQuery.get(0)), 0.1);
    assertEquals(10.0, Math.floor(consumptionByQuery.get(1)), 0.1);
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 18 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.

the class MetricCollectorsTest method shouldNotIncludeRestoreConsumersWhenComputingPerQueryStats.

@Test
public void shouldNotIncludeRestoreConsumersWhenComputingPerQueryStats() throws Exception {
    ConsumerCollector collector1 = new ConsumerCollector();
    collector1.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
    ConsumerCollector collector2 = new ConsumerCollector();
    collector2.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
    ConsumerCollector collector3 = new ConsumerCollector();
    collector3.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "group2"));
    // The restore consumer doesn't have a group id, and hence we should not count it as part of
    // the overall query stats.
    ConsumerCollector collector4 = new ConsumerCollector();
    collector4.configure(ImmutableMap.of(ConsumerConfig.CLIENT_ID_CONFIG, "restore-consumer-client"));
    Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = new HashMap<>();
    List<ConsumerRecord<Object, Object>> recordList = new ArrayList<>();
    for (int i = 0; i < 500; i++) {
        recordList.add(new ConsumerRecord<>(TEST_TOPIC, 1, 1, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890"));
    }
    records.put(new TopicPartition(TEST_TOPIC, 1), recordList);
    ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records);
    collector1.onConsume(consumerRecords);
    collector2.onConsume(consumerRecords);
    collector3.onConsume(consumerRecords);
    collector4.onConsume(consumerRecords);
    List<Double> consumptionByQuery = new ArrayList<>(MetricCollectors.currentConsumptionRateByQuery());
    consumptionByQuery.sort(Comparator.naturalOrder());
    // Each query will have a unique consumer group id. In this case we have two queries and 3
    // consumers. So we should expect two results from the currentConsumptionRateByQuery call.
    assertEquals(2, consumptionByQuery.size());
    // Same as the above test, the kafka `Rate` measurable stat reports the rate as a tenth
    // of what it should be because all the samples haven't been filled out yet.
    assertEquals(5.0, Math.floor(consumptionByQuery.get(0)), 0.1);
    assertEquals(10.0, Math.floor(consumptionByQuery.get(1)), 0.1);
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 19 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.

the class CommandRunnerTest method getRecordMap.

private Map<TopicPartition, List<ConsumerRecord<CommandId, Command>>> getRecordMap() {
    List<Pair<CommandId, Command>> commandList = new TestUtils().getAllPriorCommandRecords();
    List<ConsumerRecord<CommandId, Command>> recordList = new ArrayList<>();
    for (Pair commandPair : commandList) {
        recordList.add(new ConsumerRecord<>("T", 1, 1, (CommandId) commandPair.getLeft(), (Command) commandPair.getRight()));
    }
    Map<TopicPartition, List<ConsumerRecord<CommandId, Command>>> recordMap = new HashMap<>();
    recordMap.put(new TopicPartition("T", 1), recordList);
    return recordMap;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TestUtils(io.confluent.ksql.rest.server.utils.TestUtils) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) Pair(io.confluent.ksql.util.Pair)

Example 20 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.

the class TopicStreamWriterFormatTest method shouldMatchJsonFormatter.

@Test
public void shouldMatchJsonFormatter() throws Exception {
    SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
    replay(schemaRegistryClient);
    /**
     * Test data
     */
    String json = "{    \"name\": \"myrecord\"," + "    \"type\": \"record\"" + "}";
    ConsumerRecord<String, Bytes> record = new ConsumerRecord<String, Bytes>("topic", 1, 1, "key", new Bytes(json.getBytes()));
    assertTrue(TopicStreamWriter.Format.JSON.isFormat("topic", record, schemaRegistryClient));
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaRegistryClient(io.confluent.kafka.schemaregistry.client.SchemaRegistryClient) Test(org.junit.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)309 TopicPartition (org.apache.kafka.common.TopicPartition)158 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)120 List (java.util.List)99 HashMap (java.util.HashMap)97 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)28 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22