Search in sources :

Example 51 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testReadCommittedLagMetric.

@Test
public void testReadCommittedLagMetric() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
    Map<String, String> tags = new HashMap<>();
    tags.put("topic", tp0.topic());
    tags.put("partition", String.valueOf(tp0.partition()));
    MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
    // recordsFetchLagMax should be initialized to NaN
    assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON);
    // recordsFetchLagMax should be lso - fetchOffset after receiving an empty FetchResponse
    fetchRecords(tidp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0);
    assertEquals(50, (Double) recordsFetchLagMax.metricValue(), EPSILON);
    KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
    assertEquals(50, (Double) partitionLag.metricValue(), EPSILON);
    // recordsFetchLagMax should be lso - offset of the last message after receiving a non-empty FetchResponse
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    fetchRecords(tidp0, builder.build(), Errors.NONE, 200L, 150L, 0);
    assertEquals(147, (Double) recordsFetchLagMax.metricValue(), EPSILON);
    assertEquals(147, (Double) partitionLag.metricValue(), EPSILON);
    // verify de-registration of partition lag
    subscriptions.unsubscribe();
    fetcher.sendFetches();
    assertFalse(allMetrics.containsKey(partitionLagMetric));
}
Also used : MetricName(org.apache.kafka.common.MetricName) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.jupiter.api.Test)

Example 52 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testFetcherMetricsTemplates.

@Test
public void testFetcherMetricsTemplates() {
    Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA");
    buildFetcher(new MetricConfig().tags(clientTags), OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    // Fetch from topic to generate topic metrics
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
    assertTrue(partitionRecords.containsKey(tp0));
    // Create throttle metrics
    Fetcher.throttleTimeSensor(metrics, metricsRegistry);
    // Verify that all metrics except metrics-count have registered templates
    Set<MetricNameTemplate> allMetrics = new HashSet<>();
    for (MetricName n : metrics.metrics().keySet()) {
        String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}");
        if (!n.group().equals("kafka-metrics-count"))
            allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet()));
    }
    TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates");
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) MetricName(org.apache.kafka.common.MetricName) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 53 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testSeekBeforeException.

@Test
public void testSeekBeforeException() {
    buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED);
    assignFromUser(mkSet(tp0));
    subscriptions.seek(tp0, 1);
    assertEquals(1, fetcher.sendFetches());
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setRecords(records));
    client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertEquals(2, fetchedRecords().get(tp0).size());
    subscriptions.assignFromUser(mkSet(tp0, tp1));
    subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
    assertEquals(1, fetcher.sendFetches());
    partitions = new HashMap<>();
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    assertEquals(1, fetchedRecords().get(tp0).size());
    subscriptions.seek(tp1, 10);
    // Should not throw OffsetOutOfRangeException after the seek
    assertEmptyFetch("Should not return records or advance position after seeking to end of topic partitions");
}
Also used : PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.jupiter.api.Test)

Example 54 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class KafkaConsumerTest method testConstructorClose.

@Test
public void testConstructorClose() {
    Properties props = new Properties();
    props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
    props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj");
    props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
    final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
    final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
    try {
        new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
        fail("should have caught an exception and returned");
    } catch (KafkaException e) {
        assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
        assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
        assertEquals("Failed to construct kafka consumer", e.getMessage());
    }
}
Also used : MockMetricsReporter(org.apache.kafka.test.MockMetricsReporter) KafkaException(org.apache.kafka.common.KafkaException) Properties(java.util.Properties) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.jupiter.api.Test)

Example 55 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class KafkaConsumerTest method testConsumerJmxPrefix.

@Test
public void testConsumerJmxPrefix() throws Exception {
    Map<String, Object> config = new HashMap<>();
    config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
    config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
    config.put("client.id", "client-1");
    KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
    MBeanServer server = ManagementFactory.getPlatformMBeanServer();
    MetricName testMetricName = consumer.metrics.metricName("test-metric", "grp1", "test metric");
    consumer.metrics.addMetric(testMetricName, new Avg());
    assertNotNull(server.getObjectInstance(new ObjectName("kafka.consumer:type=grp1,client-id=client-1")));
    consumer.close();
}
Also used : MetricName(org.apache.kafka.common.MetricName) Avg(org.apache.kafka.common.metrics.stats.Avg) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.jupiter.api.Test)

Aggregations

ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)59 TopicPartition (org.apache.kafka.common.TopicPartition)24 ArrayList (java.util.ArrayList)22 Test (org.junit.Test)22 Test (org.junit.jupiter.api.Test)22 List (java.util.List)17 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)17 HashMap (java.util.HashMap)16 ByteBuffer (java.nio.ByteBuffer)14 LinkedHashMap (java.util.LinkedHashMap)14 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)14 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)14 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Metrics (org.apache.kafka.common.metrics.Metrics)10 Arrays.asList (java.util.Arrays.asList)9 Collections.emptyList (java.util.Collections.emptyList)9 Collections.singletonList (java.util.Collections.singletonList)9 Map (java.util.Map)9 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)7