Search in sources :

Example 51 with Metrics

use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.

the class KafkaConsumerTest method newConsumer.

private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, boolean autoCommitEnabled, int autoCommitIntervalMs) {
    // create a consumer with mocked time and mocked network
    String clientId = "mock-consumer";
    String groupId = "mock-group";
    String metricGroupPrefix = "consumer";
    long retryBackoffMs = 100;
    long requestTimeoutMs = 30000;
    boolean excludeInternalTopics = true;
    int minBytes = 1;
    int maxBytes = Integer.MAX_VALUE;
    int maxWaitMs = 500;
    int fetchSize = 1024 * 1024;
    int maxPollRecords = Integer.MAX_VALUE;
    boolean checkCrcs = true;
    Deserializer<String> keyDeserializer = new StringDeserializer();
    Deserializer<String> valueDeserializer = new StringDeserializer();
    OffsetResetStrategy autoResetStrategy = OffsetResetStrategy.EARLIEST;
    List<PartitionAssignor> assignors = Arrays.asList(assignor);
    ConsumerInterceptors<String, String> interceptors = null;
    Metrics metrics = new Metrics();
    SubscriptionState subscriptions = new SubscriptionState(autoResetStrategy);
    ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(client, metadata, time, retryBackoffMs, requestTimeoutMs);
    ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics);
    Fetcher<String, String> fetcher = new Fetcher<>(consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs);
    return new KafkaConsumer<>(clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs);
}
Also used : StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerCoordinator(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) SubscriptionState(org.apache.kafka.clients.consumer.internals.SubscriptionState) Fetcher(org.apache.kafka.clients.consumer.internals.Fetcher) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor)

Example 52 with Metrics

use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.

the class SenderTest method setup.

@Before
public void setup() {
    Map<String, String> metricTags = new LinkedHashMap<>();
    metricTags.put("client-id", CLIENT_ID);
    MetricConfig metricConfig = new MetricConfig().tags(metricTags);
    metrics = new Metrics(metricConfig, time);
    accumulator = new RecordAccumulator(batchSize, 1024 * 1024, CompressionType.NONE, 0L, 0L, metrics, time);
    sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, metrics, time, REQUEST_TIMEOUT);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) Metrics(org.apache.kafka.common.metrics.Metrics) LinkedHashMap(java.util.LinkedHashMap) Before(org.junit.Before)

Example 53 with Metrics

use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.

the class SenderTest method testSendInOrder.

@Test
public void testSendInOrder() throws Exception {
    int maxRetries = 1;
    Metrics m = new Metrics();
    try {
        Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT);
        // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
        Cluster cluster1 = TestUtils.clusterWith(2, "test", 2);
        metadata.update(cluster1, Collections.<String>emptySet(), time.milliseconds());
        // Send the first message.
        TopicPartition tp2 = new TopicPartition("test", 1);
        accumulator.append(tp2, 0L, "key1".getBytes(), "value1".getBytes(), null, MAX_BLOCK_TIMEOUT);
        // connect
        sender.run(time.milliseconds());
        // send produce request
        sender.run(time.milliseconds());
        String id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        Node node = new Node(Integer.parseInt(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.hasInFlightRequests());
        assertTrue("Client ready status should be true", client.isReady(node, 0L));
        time.sleep(900);
        // Now send another message to tp2
        accumulator.append(tp2, 0L, "key2".getBytes(), "value2".getBytes(), null, MAX_BLOCK_TIMEOUT);
        // Update metadata before sender receives response from broker 0. Now partition 2 moves to broker 0
        Cluster cluster2 = TestUtils.singletonCluster("test", 2);
        metadata.update(cluster2, Collections.<String>emptySet(), time.milliseconds());
        // Sender should not send the second message to node 0.
        sender.run(time.milliseconds());
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.hasInFlightRequests());
    } finally {
        m.close();
    }
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) Test(org.junit.Test)

Example 54 with Metrics

use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.

the class SenderTest method testRetries.

@Test
public void testRetries() throws Exception {
    // create a sender with retries = 1
    int maxRetries = 1;
    Metrics m = new Metrics();
    try {
        Sender sender = new Sender(client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT);
        // do a successful retry
        Future<RecordMetadata> future = accumulator.append(tp, 0L, "key".getBytes(), "value".getBytes(), null, MAX_BLOCK_TIMEOUT).future;
        // connect
        sender.run(time.milliseconds());
        // send produce request
        sender.run(time.milliseconds());
        String id = client.requests().peek().destination();
        Node node = new Node(Integer.parseInt(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.hasInFlightRequests());
        assertTrue("Client ready status should be true", client.isReady(node, 0L));
        client.disconnect(id);
        assertEquals(0, client.inFlightRequestCount());
        assertFalse(client.hasInFlightRequests());
        assertFalse("Client ready status should be false", client.isReady(node, 0L));
        // receive error
        sender.run(time.milliseconds());
        // reconnect
        sender.run(time.milliseconds());
        // resend
        sender.run(time.milliseconds());
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.hasInFlightRequests());
        long offset = 0;
        client.respond(produceResponse(tp, offset, Errors.NONE, 0));
        sender.run(time.milliseconds());
        assertTrue("Request should have retried and completed", future.isDone());
        assertEquals(offset, future.get().offset());
        // do an unsuccessful retry
        future = accumulator.append(tp, 0L, "key".getBytes(), "value".getBytes(), null, MAX_BLOCK_TIMEOUT).future;
        // send produce request
        sender.run(time.milliseconds());
        for (int i = 0; i < maxRetries + 1; i++) {
            client.disconnect(client.requests().peek().destination());
            // receive error
            sender.run(time.milliseconds());
            // reconnect
            sender.run(time.milliseconds());
            // resend
            sender.run(time.milliseconds());
        }
        sender.run(time.milliseconds());
        completedWithError(future, Errors.NETWORK_EXCEPTION);
    } finally {
        m.close();
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Metrics(org.apache.kafka.common.metrics.Metrics) Node(org.apache.kafka.common.Node) Test(org.junit.Test)

Example 55 with Metrics

use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.

the class FetcherTest method testFetchedRecordsRaisesOnSerializationErrors.

@Test
public void testFetchedRecordsRaisesOnSerializationErrors() {
    // raise an exception from somewhere in the middle of the fetch response
    // so that we can verify that our position does not advance after raising
    ByteArrayDeserializer deserializer = new ByteArrayDeserializer() {

        int i = 0;

        @Override
        public byte[] deserialize(String topic, byte[] data) {
            if (i++ == 1)
                throw new SerializationException();
            return data;
        }
    };
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time), deserializer, deserializer);
    subscriptions.assignFromUser(singleton(tp));
    subscriptions.seek(tp, 1);
    client.prepareResponse(matchesOffset(tp, 1), fetchResponse(this.records, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    try {
        fetcher.fetchedRecords();
        fail("fetchedRecords should have raised");
    } catch (SerializationException e) {
        // the position should not advance since no data has been returned
        assertEquals(1, subscriptions.position(tp).longValue());
    }
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) SerializationException(org.apache.kafka.common.errors.SerializationException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.Test)

Aggregations

Metrics (org.apache.kafka.common.metrics.Metrics)103 Test (org.junit.Test)76 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)41 HashMap (java.util.HashMap)31 StreamsConfig (org.apache.kafka.streams.StreamsConfig)28 TaskId (org.apache.kafka.streams.processor.TaskId)27 Before (org.junit.Before)22 MockTime (org.apache.kafka.common.utils.MockTime)21 TopicPartition (org.apache.kafka.common.TopicPartition)20 HashSet (java.util.HashSet)19 StreamsMetrics (org.apache.kafka.streams.StreamsMetrics)17 MockClientSupplier (org.apache.kafka.test.MockClientSupplier)17 UUID (java.util.UUID)16 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)15 Bytes (org.apache.kafka.common.utils.Bytes)14 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)14 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)13 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)13 MockProcessorContext (org.apache.kafka.test.MockProcessorContext)13 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)11