use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class ConsumerNetworkClient method send.
/**
* Send a new request. Note that the request is not actually transmitted on the
* network until one of the {@link #poll(long)} variants is invoked. At this
* point the request will either be transmitted successfully or will fail.
* Use the returned future to obtain the result of the send. Note that there is no
* need to check for disconnects explicitly on the {@link ClientResponse} object;
* instead, the future will be failed with a {@link DisconnectException}.
*
* @param node The destination of the request
* @param requestBuilder A builder for the request payload
* @return A future which indicates the result of the send.
*/
public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) {
long now = time.milliseconds();
RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler();
ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler);
unsent.put(node, clientRequest);
// wakeup the client in case it is blocking in poll so that we can send the queued request
client.wakeup();
return completionHandler.future;
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class StreamsKafkaClient method checkBrokerCompatibility.
/**
* Check if the used brokers have version 0.10.1.x or higher.
* <p>
* Note, for <em>pre</em> 0.10.x brokers the broker version cannot be checked and the client will hang and retry
* until it {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}.
*
* @throws StreamsException if brokers have version 0.10.0.x
*/
public void checkBrokerCompatibility() throws StreamsException {
final ClientRequest clientRequest = kafkaClient.newClientRequest(getAnyReadyBrokerId(), new ApiVersionsRequest.Builder(), Time.SYSTEM.milliseconds(), true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof ApiVersionsResponse)) {
throw new StreamsException("Inconsistent response type for API versions request. " + "Expected ApiVersionsResponse but received " + clientResponse.responseBody().getClass().getName());
}
final ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) clientResponse.responseBody();
if (apiVersionsResponse.apiVersion(ApiKeys.CREATE_TOPICS.id) == null) {
throw new StreamsException("Kafka Streams requires broker version 0.10.1.x or higher.");
}
}
use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testCorrectHandlingOfOutOfOrderResponsesWhenSecondSucceeds.
@Test
public void testCorrectHandlingOfOutOfOrderResponsesWhenSecondSucceeds() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
// Send second ProduceRequest
Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
assertEquals(2, client.inFlightRequestCount());
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
ClientRequest firstClientRequest = client.requests().peek();
ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
client.respondToRequest(secondClientRequest, produceResponse(tp0, 1, Errors.NONE, 1));
// receive response 1
sender.run(time.milliseconds());
assertTrue(request2.isDone());
assertEquals(1, request2.get().offset());
assertFalse(request1.isDone());
Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
assertEquals(0, queuedBatches.size());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.REQUEST_TIMED_OUT, -1));
// receive response 0
sender.run(time.milliseconds());
// Make sure we requeued both batches in the correct order.
assertEquals(1, queuedBatches.size());
assertEquals(0, queuedBatches.peekFirst().baseSequence());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
// resend request 0
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
// Make sure we handle the out of order successful responses correctly.
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
// receive response 0
sender.run(time.milliseconds());
assertEquals(0, queuedBatches.size());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
assertFalse(client.hasInFlightRequests());
assertTrue(request1.isDone());
assertEquals(0, request1.get().offset());
}
use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testCorrectHandlingOfOutOfOrderResponses.
@Test
public void testCorrectHandlingOfOutOfOrderResponses() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
assertEquals(2, client.inFlightRequestCount());
assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
ClientRequest firstClientRequest = client.requests().peek();
ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
client.respondToRequest(secondClientRequest, produceResponse(tp0, -1, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1));
// receive response 1
sender.run(time.milliseconds());
Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
// Make sure that we are queueing the second batch first.
assertEquals(1, queuedBatches.size());
assertEquals(1, queuedBatches.peekFirst().baseSequence());
assertEquals(1, client.inFlightRequestCount());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.NOT_LEADER_FOR_PARTITION, -1));
// receive response 0
sender.run(time.milliseconds());
// Make sure we requeued both batches in the correct order.
assertEquals(2, queuedBatches.size());
assertEquals(0, queuedBatches.peekFirst().baseSequence());
assertEquals(1, queuedBatches.peekLast().baseSequence());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
assertFalse(request1.isDone());
assertFalse(request2.isDone());
// send request 0
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
// don't do anything, only one inflight allowed once we are retrying.
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
// Make sure that the requests are sent in order, even though the previous responses were not in order.
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
// receive response 0
sender.run(time.milliseconds());
assertEquals(0, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
assertTrue(request1.isDone());
assertEquals(0, request1.get().offset());
// send request 1
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1L);
// receive response 1
sender.run(time.milliseconds());
assertFalse(client.hasInFlightRequests());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
assertTrue(request2.isDone());
assertEquals(1, request2.get().offset());
}
use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitDisabled.
/**
* Verify that when a consumer changes its topic subscription its assigned partitions
* do not immediately change, and the consumed offsets of its to-be-revoked partitions
* are not committed (when auto-commit is disabled).
* Upon unsubscribing from subscribed topics, the assigned partitions immediately
* change but if auto-commit is disabled the consumer offsets are not committed.
*/
@Test
public void testSubscriptionChangesWithAutoCommitDisabled() {
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false);
// initial subscription
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().equals(singleton(topic)));
assertTrue(consumer.assignment().isEmpty());
// mock rebalance responses
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.poll(0);
// verify that subscription is still the same, and now assignment has caught up
assertTrue(consumer.subscription().equals(singleton(topic)));
assertTrue(consumer.assignment().equals(singleton(tp0)));
consumer.poll(0);
// subscription change
consumer.subscribe(singleton(topic2), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().equals(singleton(topic2)));
assertTrue(consumer.assignment().equals(singleton(tp0)));
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
// subscription change
consumer.unsubscribe();
// verify that subscription and assignment are both updated
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
client.requests().clear();
consumer.close();
}
Aggregations