use of org.apache.kafka.clients.consumer.internals.SubscriptionState in project kafka by apache.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, boolean autoCommitEnabled, int autoCommitIntervalMs) {
// create a consumer with mocked time and mocked network
String clientId = "mock-consumer";
String groupId = "mock-group";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
long requestTimeoutMs = 30000;
boolean excludeInternalTopics = true;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> valueDeserializer = new StringDeserializer();
OffsetResetStrategy autoResetStrategy = OffsetResetStrategy.EARLIEST;
List<PartitionAssignor> assignors = Arrays.asList(assignor);
ConsumerInterceptors<String, String> interceptors = null;
Metrics metrics = new Metrics();
SubscriptionState subscriptions = new SubscriptionState(autoResetStrategy);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(client, metadata, time, retryBackoffMs, requestTimeoutMs);
ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics);
Fetcher<String, String> fetcher = new Fetcher<>(consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs);
return new KafkaConsumer<>(clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs);
}
use of org.apache.kafka.clients.consumer.internals.SubscriptionState in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, OffsetResetStrategy resetStrategy, boolean autoCommitEnabled) {
String clientId = "mock-consumer";
String groupId = "mock-group";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
long requestTimeoutMs = 30000;
boolean excludeInternalTopics = true;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
int rebalanceTimeoutMs = 60000;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> valueDeserializer = new StringDeserializer();
List<PartitionAssignor> assignors = singletonList(assignor);
ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.<ConsumerInterceptor<String, String>>emptyList());
Metrics metrics = new Metrics();
ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
SubscriptionState subscriptions = new SubscriptionState(resetStrategy);
LogContext loggerFactory = new LogContext();
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(loggerFactory, consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics, true);
Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED);
return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs, assignors);
}
use of org.apache.kafka.clients.consumer.internals.SubscriptionState in project kafka by apache.
the class KafkaConsumerTest method testMeasureCommitSyncDuration.
@Test
public void testMeasureCommitSyncDuration() {
Time time = new MockTime(Duration.ofSeconds(1).toMillis());
SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 2));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(offsetCommitResponse(Collections.singletonMap(tp0, Errors.NONE)), coordinator);
consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(10L)));
final Metric metric = consumer.metrics().get(consumer.metrics.metricName("commit-sync-time-ns-total", "consumer-metrics"));
assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos());
}
use of org.apache.kafka.clients.consumer.internals.SubscriptionState in project kafka by apache.
the class KafkaConsumerTest method testMissingOffsetNoResetPolicy.
@Test
public void testMissingOffsetNoResetPolicy() {
SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// lookup committed offset and find nothing
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator);
assertThrows(NoOffsetForPartitionException.class, () -> consumer.poll(Duration.ZERO));
}
use of org.apache.kafka.clients.consumer.internals.SubscriptionState in project kafka by apache.
the class KafkaConsumerTest method testResetToCommittedOffset.
@Test
public void testResetToCommittedOffset() {
SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator);
consumer.poll(Duration.ZERO);
assertEquals(539L, consumer.position(tp0));
}
Aggregations