use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testReadCommittedLagMetric.
@Test
public void testReadCommittedLagMetric() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
Map<String, String> tags = new HashMap<>();
tags.put("topic", tp0.topic());
tags.put("partition", String.valueOf(tp0.partition()));
MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
// recordsFetchLagMax should be initialized to NaN
assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON);
// recordsFetchLagMax should be lso - fetchOffset after receiving an empty FetchResponse
fetchRecords(tidp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0);
assertEquals(50, (Double) recordsFetchLagMax.metricValue(), EPSILON);
KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
assertEquals(50, (Double) partitionLag.metricValue(), EPSILON);
// recordsFetchLagMax should be lso - offset of the last message after receiving a non-empty FetchResponse
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
fetchRecords(tidp0, builder.build(), Errors.NONE, 200L, 150L, 0);
assertEquals(147, (Double) recordsFetchLagMax.metricValue(), EPSILON);
assertEquals(147, (Double) partitionLag.metricValue(), EPSILON);
// verify de-registration of partition lag
subscriptions.unsubscribe();
fetcher.sendFetches();
assertFalse(allMetrics.containsKey(partitionLagMetric));
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testFetcherMetricsTemplates.
@Test
public void testFetcherMetricsTemplates() {
Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA");
buildFetcher(new MetricConfig().tags(clientTags), OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
// Fetch from topic to generate topic metrics
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
assertTrue(partitionRecords.containsKey(tp0));
// Create throttle metrics
Fetcher.throttleTimeSensor(metrics, metricsRegistry);
// Verify that all metrics except metrics-count have registered templates
Set<MetricNameTemplate> allMetrics = new HashSet<>();
for (MetricName n : metrics.metrics().keySet()) {
String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}");
if (!n.group().equals("kafka-metrics-count"))
allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet()));
}
TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates");
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testSeekBeforeException.
@Test
public void testSeekBeforeException() {
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0));
subscriptions.seek(tp0, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setRecords(records));
client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertEquals(2, fetchedRecords().get(tp0).size());
subscriptions.assignFromUser(mkSet(tp0, tp1));
subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
assertEquals(1, fetcher.sendFetches());
partitions = new HashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
assertEquals(1, fetchedRecords().get(tp0).size());
subscriptions.seek(tp1, 10);
// Should not throw OffsetOutOfRangeException after the seek
assertEmptyFetch("Should not return records or advance position after seeking to end of topic partitions");
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class KafkaConsumerTest method testConstructorClose.
@Test
public void testConstructorClose() {
Properties props = new Properties();
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
try {
new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
fail("should have caught an exception and returned");
} catch (KafkaException e) {
assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
assertEquals("Failed to construct kafka consumer", e.getMessage());
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class KafkaConsumerTest method testConsumerJmxPrefix.
@Test
public void testConsumerJmxPrefix() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
config.put("client.id", "client-1");
KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
MetricName testMetricName = consumer.metrics.metricName("test-metric", "grp1", "test metric");
consumer.metrics.addMetric(testMetricName, new Avg());
assertNotNull(server.getObjectInstance(new ObjectName("kafka.consumer:type=grp1,client-id=client-1")));
consumer.close();
}
Aggregations