use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulatorTest method testRetryBackoff.
@Test
public void testRetryBackoff() throws Exception {
long lingerMs = Long.MAX_VALUE / 4;
long retryBackoffMs = Long.MAX_VALUE / 2;
final RecordAccumulator accum = new RecordAccumulator(logContext, 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time, new ApiVersions(), null);
long now = time.milliseconds();
accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, now + lingerMs + 1);
assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1);
assertEquals("Node1 should be the only ready node.", 1, batches.size());
assertEquals("Partition 0 should only have one batch drained.", 1, batches.get(0).size());
// Reenqueue the batch
now = time.milliseconds();
accum.reenqueue(batches.get(0).get(0), now);
// Put message for partition 1 into accumulator
accum.append(tp2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
result = accum.ready(cluster, now + lingerMs + 1);
assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
// tp1 should backoff while tp2 should not
batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1);
assertEquals("Node1 should be the only ready node.", 1, batches.size());
assertEquals("Node1 should only have one batch drained.", 1, batches.get(0).size());
assertEquals("Node1 should only have one batch for partition 1.", tp2, batches.get(0).get(0).topicPartition);
// Partition 0 can be drained after retry backoff
result = accum.ready(cluster, now + retryBackoffMs + 1);
assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + retryBackoffMs + 1);
assertEquals("Node1 should be the only ready node.", 1, batches.size());
assertEquals("Node1 should only have one batch drained.", 1, batches.get(0).size());
assertEquals("Node1 should only have one batch for partition 0.", tp1, batches.get(0).get(0).topicPartition);
}
use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testQuotaMetrics.
/*
* Send multiple requests. Verify that the client side quota metrics have the right values
*/
@Test
@SuppressWarnings("deprecation")
public void testQuotaMetrics() throws Exception {
MockSelector selector = new MockSelector(time);
Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry);
Cluster cluster = TestUtils.singletonCluster("test", 1);
Node node = cluster.nodes().get(0);
NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, logContext);
short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
selector.clear();
for (int i = 1; i <= 3; i++) {
int throttleTimeMs = 100 * i;
ProduceRequest.Builder builder = ProduceRequest.Builder.forCurrentMagic((short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap());
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
client.send(request, time.milliseconds());
client.poll(1, time.milliseconds());
ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs);
buffer = response.serialize(ApiKeys.PRODUCE.latestVersion(), new ResponseHeader(request.correlationId()));
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
client.poll(1, time.milliseconds());
selector.clear();
}
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric avgMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeAvg);
KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax);
// Throttle times are ApiVersions=400, Produce=(100, 200, 300)
assertEquals(250, avgMetric.value(), EPS);
assertEquals(400, maxMetric.value(), EPS);
client.close();
}
use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testQuotaMetrics.
/*
* Send multiple requests. Verify that the client side quota metrics have the right values
*/
@Test
public void testQuotaMetrics() throws Exception {
MockSelector selector = new MockSelector(time);
Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry);
Cluster cluster = TestUtils.singletonCluster("test", 1);
Node node = cluster.nodes().get(0);
NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, new LogContext());
short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
selector.clear();
for (int i = 1; i <= 3; i++) {
int throttleTimeMs = 100 * i;
FetchRequest.Builder builder = FetchRequest.Builder.forConsumer(100, 100, new LinkedHashMap<TopicPartition, PartitionData>());
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
client.send(request, time.milliseconds());
client.poll(1, time.milliseconds());
FetchResponse response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs);
buffer = response.serialize(ApiKeys.FETCH.latestVersion(), new ResponseHeader(request.correlationId()));
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
client.poll(1, time.milliseconds());
selector.clear();
}
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric avgMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeAvg));
KafkaMetric maxMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeMax));
// Throttle times are ApiVersions=400, Fetch=(100, 200, 300)
assertEquals(250, avgMetric.value(), EPSILON);
assertEquals(400, maxMetric.value(), EPSILON);
client.close();
}
use of org.apache.kafka.clients.ApiVersions in project kafka by apache.
the class FetcherTest method testFetcherConcurrency.
@Test
public void testFetcherConcurrency() throws Exception {
int numPartitions = 20;
Set<TopicPartition> topicPartitions = new HashSet<>();
for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
LogContext logContext = new LogContext();
buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {
@Override
protected FetchSessionHandler sessionHandler(int id) {
final FetchSessionHandler handler = super.sessionHandler(id);
if (handler == null)
return null;
else {
return new FetchSessionHandler(new LogContext(), id) {
@Override
public Builder newBuilder() {
verifySessionPartitions();
return handler.newBuilder();
}
@Override
public boolean handleResponse(FetchResponse response, short version) {
verifySessionPartitions();
return handler.handleResponse(response, version);
}
@Override
public void handleError(Throwable t) {
verifySessionPartitions();
handler.handleError(t);
}
// Verify that session partitions can be traversed safely.
private void verifySessionPartitions() {
try {
Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
field.setAccessible(true);
LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
// If `sessionPartitions` are modified on another thread, Thread.yield will increase the
// possibility of ConcurrentModificationException if appropriate synchronization is not used.
Thread.yield();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
};
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
client.updateMetadata(initialMetadataResponse);
fetchSize = 10000;
assignFromUser(topicPartitions);
topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
TopicIdPartition tp = entry.getKey();
long offset = entry.getValue().fetchOffset;
responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
}
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
fetchedRecords.forEach((tp, records) -> {
assertEquals(2, records.size());
long nextOffset = nextFetchOffsets.get(tp);
assertEquals(nextOffset, records.get(0).offset());
assertEquals(nextOffset + 1, records.get(1).offset());
nextFetchOffsets.put(tp, nextOffset + 2);
});
}
}
}
assertEquals(0, future.get());
}
use of org.apache.kafka.clients.ApiVersions in project kafka by apache.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, SubscriptionState subscription, ConsumerMetadata metadata, ConsumerPartitionAssignor assignor, boolean autoCommitEnabled, String groupId, Optional<String> groupInstanceId, Optional<Deserializer<String>> valueDeserializer, boolean throwOnStableOffsetNotSupported) {
String clientId = "mock-consumer";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
int rebalanceTimeoutMs = 60000;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> deserializer = valueDeserializer.orElse(new StringDeserializer());
List<ConsumerPartitionAssignor> assignors = singletonList(assignor);
ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.emptyList());
Metrics metrics = new Metrics(time);
ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
LogContext loggerFactory = new LogContext();
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
ConsumerCoordinator consumerCoordinator = null;
if (groupId != null) {
GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, retryBackoffMs, true);
consumerCoordinator = new ConsumerCoordinator(rebalanceConfig, loggerFactory, consumerClient, assignors, metadata, subscription, metrics, metricGroupPrefix, time, autoCommitEnabled, autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported);
}
Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, "", keyDeserializer, deserializer, metadata, subscription, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, new ApiVersions());
return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, deserializer, fetcher, interceptors, time, consumerClient, metrics, subscription, metadata, retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, assignors, groupId);
}
Aggregations