use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class Sender method maybeSendAndPollTransactionalRequest.
/**
* Returns true if a transactional request is sent or polled, or if a FindCoordinator request is enqueued
*/
private boolean maybeSendAndPollTransactionalRequest() {
if (transactionManager.hasInFlightRequest()) {
// as long as there are outstanding transactional requests, we simply wait for them to return
client.poll(retryBackoffMs, time.milliseconds());
return true;
}
if (transactionManager.hasAbortableError() || transactionManager.isAborting()) {
if (accumulator.hasIncomplete()) {
// Attempt to get the last error that caused this abort.
RuntimeException exception = transactionManager.lastError();
// then this is most likely a case where there was no fatal error.
if (exception == null) {
exception = new TransactionAbortedException();
}
accumulator.abortUndrainedBatches(exception);
}
}
TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequest(accumulator.hasIncomplete());
if (nextRequestHandler == null)
return false;
AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder();
Node targetNode = null;
try {
FindCoordinatorRequest.CoordinatorType coordinatorType = nextRequestHandler.coordinatorType();
targetNode = coordinatorType != null ? transactionManager.coordinator(coordinatorType) : client.leastLoadedNode(time.milliseconds());
if (targetNode != null) {
if (!awaitNodeReady(targetNode, coordinatorType)) {
log.trace("Target node {} not ready within request timeout, will retry when node is ready.", targetNode);
maybeFindCoordinatorAndRetry(nextRequestHandler);
return true;
}
} else if (coordinatorType != null) {
log.trace("Coordinator not known for {}, will retry {} after finding coordinator.", coordinatorType, requestBuilder.apiKey());
maybeFindCoordinatorAndRetry(nextRequestHandler);
return true;
} else {
log.trace("No nodes available to send requests, will poll and retry when until a node is ready.");
transactionManager.retry(nextRequestHandler);
client.poll(retryBackoffMs, time.milliseconds());
return true;
}
if (nextRequestHandler.isRetry())
time.sleep(nextRequestHandler.retryBackoffMs());
long currentTimeMs = time.milliseconds();
ClientRequest clientRequest = client.newClientRequest(targetNode.idString(), requestBuilder, currentTimeMs, true, requestTimeoutMs, nextRequestHandler);
log.debug("Sending transactional request {} to node {} with correlation ID {}", requestBuilder, targetNode, clientRequest.correlationId());
client.send(clientRequest, currentTimeMs);
transactionManager.setInFlightCorrelationId(clientRequest.correlationId());
client.poll(retryBackoffMs, time.milliseconds());
return true;
} catch (IOException e) {
log.debug("Disconnect from {} while trying to send request {}. Going " + "to back off and retry.", targetNode, requestBuilder, e);
// We break here so that we pick up the FindCoordinator request immediately.
maybeFindCoordinatorAndRetry(nextRequestHandler);
return true;
}
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class FetcherTest method testFetcherConcurrency.
@Test
public void testFetcherConcurrency() throws Exception {
int numPartitions = 20;
Set<TopicPartition> topicPartitions = new HashSet<>();
for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
LogContext logContext = new LogContext();
buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {
@Override
protected FetchSessionHandler sessionHandler(int id) {
final FetchSessionHandler handler = super.sessionHandler(id);
if (handler == null)
return null;
else {
return new FetchSessionHandler(new LogContext(), id) {
@Override
public Builder newBuilder() {
verifySessionPartitions();
return handler.newBuilder();
}
@Override
public boolean handleResponse(FetchResponse response, short version) {
verifySessionPartitions();
return handler.handleResponse(response, version);
}
@Override
public void handleError(Throwable t) {
verifySessionPartitions();
handler.handleError(t);
}
// Verify that session partitions can be traversed safely.
private void verifySessionPartitions() {
try {
Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
field.setAccessible(true);
LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
// If `sessionPartitions` are modified on another thread, Thread.yield will increase the
// possibility of ConcurrentModificationException if appropriate synchronization is not used.
Thread.yield();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
};
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
client.updateMetadata(initialMetadataResponse);
fetchSize = 10000;
assignFromUser(topicPartitions);
topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
TopicIdPartition tp = entry.getKey();
long offset = entry.getValue().fetchOffset;
responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
}
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
fetchedRecords.forEach((tp, records) -> {
assertEquals(2, records.size());
long nextOffset = nextFetchOffsets.get(tp);
assertEquals(nextOffset, records.get(0).offset());
assertEquals(nextOffset + 1, records.get(1).offset());
nextFetchOffsets.put(tp, nextOffset + 2);
});
}
}
}
assertEquals(0, future.get());
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitDisabled.
/**
* Verify that when a consumer changes its topic subscription its assigned partitions
* do not immediately change, and the consumed offsets of its to-be-revoked partitions
* are not committed (when auto-commit is disabled).
* Upon unsubscribing from subscribed topics, the assigned partitions immediately
* change but if auto-commit is disabled the consumer offsets are not committed.
*/
@Test
public void testSubscriptionChangesWithAutoCommitDisabled() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
initMetadata(client, tpCounts);
Node node = metadata.fetch().nodes().get(0);
ConsumerPartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, groupInstanceId);
initializeSubscriptionWithSingleTopic(consumer, getConsumerRebalanceListener(consumer));
// mock rebalance responses
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
consumer.poll(Duration.ZERO);
// verify that subscription is still the same, and now assignment has caught up
assertEquals(singleton(topic), consumer.subscription());
assertEquals(singleton(tp0), consumer.assignment());
consumer.poll(Duration.ZERO);
// subscription change
consumer.subscribe(singleton(topic2), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertEquals(singleton(topic2), consumer.subscription());
assertEquals(singleton(tp0), consumer.assignment());
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertNotSame(ApiKeys.OFFSET_COMMIT, req.requestBuilder().apiKey());
// subscription change
consumer.unsubscribe();
// verify that subscription and assignment are both updated
assertEquals(Collections.emptySet(), consumer.subscription());
assertEquals(Collections.emptySet(), consumer.assignment());
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertNotSame(ApiKeys.OFFSET_COMMIT, req.requestBuilder().apiKey());
client.requests().clear();
consumer.close();
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class SenderTest method testQuotaMetrics.
/*
* Send multiple requests. Verify that the client side quota metrics have the right values
*/
@SuppressWarnings("deprecation")
@Test
public void testQuotaMetrics() {
MockSelector selector = new MockSelector(time);
Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry);
Cluster cluster = TestUtils.singletonCluster("test", 1);
Node node = cluster.nodes().get(0);
NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, 10 * 1000, 127 * 1000, time, true, new ApiVersions(), throttleTimeSensor, logContext);
ApiVersionsResponse apiVersionsResponse = ApiVersionsResponse.defaultApiVersionsResponse(400, ApiMessageType.ListenerType.ZK_BROKER);
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(apiVersionsResponse, ApiKeys.API_VERSIONS.latestVersion(), 0);
selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
while (!client.ready(node, time.milliseconds())) {
client.poll(1, time.milliseconds());
// If a throttled response is received, advance the time to ensure progress.
time.sleep(client.throttleDelayMs(node, time.milliseconds()));
}
selector.clear();
for (int i = 1; i <= 3; i++) {
int throttleTimeMs = 100 * i;
ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection()).setAcks((short) 1).setTimeoutMs(1000));
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true);
client.send(request, time.milliseconds());
client.poll(1, time.milliseconds());
ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs);
buffer = RequestTestUtils.serializeResponseWithHeader(response, ApiKeys.PRODUCE.latestVersion(), request.correlationId());
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
client.poll(1, time.milliseconds());
// If a throttled response is received, advance the time to ensure progress.
time.sleep(client.throttleDelayMs(node, time.milliseconds()));
selector.clear();
}
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric avgMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeAvg);
KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax);
// Throttle times are ApiVersions=400, Produce=(100, 200, 300)
assertEquals(250, (Double) avgMetric.metricValue(), EPS);
assertEquals(400, (Double) maxMetric.metricValue(), EPS);
client.close();
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class SenderTest method testCorrectHandlingOfDuplicateSequenceError.
@Test
public void testCorrectHandlingOfDuplicateSequenceError() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(2, client.inFlightRequestCount());
assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
ClientRequest firstClientRequest = client.requests().peek();
ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
client.respondToRequest(secondClientRequest, produceResponse(tp0, 1000, Errors.NONE, 0));
// receive response 1
sender.runOnce();
assertEquals(OptionalLong.of(1000), transactionManager.lastAckedOffset(tp0));
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
client.respondToRequest(firstClientRequest, produceResponse(tp0, ProduceResponse.INVALID_OFFSET, Errors.DUPLICATE_SEQUENCE_NUMBER, 0));
// receive response 0
sender.runOnce();
// Make sure that the last ack'd sequence doesn't change.
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
assertEquals(OptionalLong.of(1000), transactionManager.lastAckedOffset(tp0));
assertFalse(client.hasInFlightRequests());
RecordMetadata unknownMetadata = request1.get();
assertFalse(unknownMetadata.hasOffset());
assertEquals(-1L, unknownMetadata.offset());
}
Aggregations