Search in sources :

Example 1 with ConsumerMetrics

use of org.apache.kafka.clients.consumer.internals.ConsumerMetrics in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method newConsumer.

private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, OffsetResetStrategy resetStrategy, boolean autoCommitEnabled) {
    String clientId = "mock-consumer";
    String groupId = "mock-group";
    String metricGroupPrefix = "consumer";
    long retryBackoffMs = 100;
    long requestTimeoutMs = 30000;
    boolean excludeInternalTopics = true;
    int minBytes = 1;
    int maxBytes = Integer.MAX_VALUE;
    int maxWaitMs = 500;
    int fetchSize = 1024 * 1024;
    int maxPollRecords = Integer.MAX_VALUE;
    boolean checkCrcs = true;
    int rebalanceTimeoutMs = 60000;
    Deserializer<String> keyDeserializer = new StringDeserializer();
    Deserializer<String> valueDeserializer = new StringDeserializer();
    List<PartitionAssignor> assignors = singletonList(assignor);
    ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.<ConsumerInterceptor<String, String>>emptyList());
    Metrics metrics = new Metrics();
    ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
    SubscriptionState subscriptions = new SubscriptionState(resetStrategy);
    LogContext loggerFactory = new LogContext();
    ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
    ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(loggerFactory, consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics, true);
    Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED);
    return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs, assignors);
}
Also used : ConsumerInterceptors(org.apache.kafka.clients.consumer.internals.ConsumerInterceptors) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerCoordinator(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator) LogContext(org.apache.kafka.common.utils.LogContext) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) SubscriptionState(org.apache.kafka.clients.consumer.internals.SubscriptionState) Fetcher(org.apache.kafka.clients.consumer.internals.Fetcher) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics)

Example 2 with ConsumerMetrics

use of org.apache.kafka.clients.consumer.internals.ConsumerMetrics in project kafka by apache.

the class KafkaConsumerTest method newConsumer.

private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, SubscriptionState subscription, ConsumerMetadata metadata, ConsumerPartitionAssignor assignor, boolean autoCommitEnabled, String groupId, Optional<String> groupInstanceId, Optional<Deserializer<String>> valueDeserializer, boolean throwOnStableOffsetNotSupported) {
    String clientId = "mock-consumer";
    String metricGroupPrefix = "consumer";
    long retryBackoffMs = 100;
    int minBytes = 1;
    int maxBytes = Integer.MAX_VALUE;
    int maxWaitMs = 500;
    int fetchSize = 1024 * 1024;
    int maxPollRecords = Integer.MAX_VALUE;
    boolean checkCrcs = true;
    int rebalanceTimeoutMs = 60000;
    Deserializer<String> keyDeserializer = new StringDeserializer();
    Deserializer<String> deserializer = valueDeserializer.orElse(new StringDeserializer());
    List<ConsumerPartitionAssignor> assignors = singletonList(assignor);
    ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.emptyList());
    Metrics metrics = new Metrics(time);
    ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
    LogContext loggerFactory = new LogContext();
    ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
    ConsumerCoordinator consumerCoordinator = null;
    if (groupId != null) {
        GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, retryBackoffMs, true);
        consumerCoordinator = new ConsumerCoordinator(rebalanceConfig, loggerFactory, consumerClient, assignors, metadata, subscription, metrics, metricGroupPrefix, time, autoCommitEnabled, autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported);
    }
    Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, "", keyDeserializer, deserializer, metadata, subscription, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, new ApiVersions());
    return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, deserializer, fetcher, interceptors, time, consumerClient, metrics, subscription, metadata, retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, assignors, groupId);
}
Also used : ConsumerInterceptors(org.apache.kafka.clients.consumer.internals.ConsumerInterceptors) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerCoordinator(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator) LogContext(org.apache.kafka.common.utils.LogContext) GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) Fetcher(org.apache.kafka.clients.consumer.internals.Fetcher) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics)

Aggregations

ConsumerCoordinator (org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)2 ConsumerInterceptors (org.apache.kafka.clients.consumer.internals.ConsumerInterceptors)2 ConsumerMetrics (org.apache.kafka.clients.consumer.internals.ConsumerMetrics)2 ConsumerNetworkClient (org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient)2 Fetcher (org.apache.kafka.clients.consumer.internals.Fetcher)2 Metrics (org.apache.kafka.common.metrics.Metrics)2 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)2 LogContext (org.apache.kafka.common.utils.LogContext)2 ApiVersions (org.apache.kafka.clients.ApiVersions)1 GroupRebalanceConfig (org.apache.kafka.clients.GroupRebalanceConfig)1 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)1 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)1 SubscriptionState (org.apache.kafka.clients.consumer.internals.SubscriptionState)1