use of org.apache.kafka.clients.ClientResponse in project kafka by apache.
the class ConsumerNetworkClientTest method send.
@Test
public void send() {
client.prepareResponse(heartbeatResponse(Errors.NONE));
RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future.isDone());
consumerClient.poll(future);
assertTrue(future.isDone());
assertTrue(future.succeeded());
ClientResponse clientResponse = future.value();
HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
assertEquals(Errors.NONE, response.error());
}
use of org.apache.kafka.clients.ClientResponse in project kafka by apache.
the class StreamsKafkaClient method checkBrokerCompatibility.
/**
* Check if the used brokers have version 0.10.1.x or higher.
* <p>
* Note, for <em>pre</em> 0.10.x brokers the broker version cannot be checked and the client will hang and retry
* until it {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}.
*
* @throws StreamsException if brokers have version 0.10.0.x
*/
public void checkBrokerCompatibility() throws StreamsException {
final ClientRequest clientRequest = kafkaClient.newClientRequest(getAnyReadyBrokerId(), new ApiVersionsRequest.Builder(), Time.SYSTEM.milliseconds(), true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof ApiVersionsResponse)) {
throw new StreamsException("Inconsistent response type for API versions request. " + "Expected ApiVersionsResponse but received " + clientResponse.responseBody().getClass().getName());
}
final ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) clientResponse.responseBody();
if (apiVersionsResponse.apiVersion(ApiKeys.CREATE_TOPICS.id) == null) {
throw new StreamsException("Kafka Streams requires broker version 0.10.1.x or higher.");
}
}
use of org.apache.kafka.clients.ClientResponse in project kafka by apache.
the class ConsumerNetworkClient method checkDisconnects.
private void checkDisconnects(long now) {
// and set the disconnect flag in the ClientResponse
for (Node node : unsent.nodes()) {
if (client.connectionFailed(node)) {
// Remove entry before invoking request callback to avoid callbacks handling
// coordinator failures traversing the unsent list again.
Collection<ClientRequest> requests = unsent.remove(node);
for (ClientRequest request : requests) {
RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) request.callback();
handler.onComplete(new ClientResponse(request.makeHeader(request.requestBuilder().desiredOrLatestVersion()), request.callback(), request.destination(), request.createdTimeMs(), now, true, null, null));
}
}
}
}
use of org.apache.kafka.clients.ClientResponse in project kafka by apache.
the class ConsumerNetworkClientTest method sendExpiry.
@Test
public void sendExpiry() throws InterruptedException {
long unsentExpiryMs = 10;
final AtomicBoolean isReady = new AtomicBoolean();
final AtomicBoolean disconnected = new AtomicBoolean();
client = new MockClient(time) {
@Override
public boolean ready(Node node, long now) {
if (isReady.get())
return super.ready(node, now);
else
return false;
}
@Override
public boolean connectionFailed(Node node) {
return disconnected.get();
}
};
// Queue first send, sleep long enough for this to expire and then queue second send
consumerClient = new ConsumerNetworkClient(client, metadata, time, 100, unsentExpiryMs);
RequestFuture<ClientResponse> future1 = consumerClient.send(node, heartbeat());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future1.isDone());
time.sleep(unsentExpiryMs + 1);
RequestFuture<ClientResponse> future2 = consumerClient.send(node, heartbeat());
assertEquals(2, consumerClient.pendingRequestCount());
assertEquals(2, consumerClient.pendingRequestCount(node));
assertFalse(future2.isDone());
// First send should have expired and second send still pending
consumerClient.poll(0);
assertTrue(future1.isDone());
assertFalse(future1.succeeded());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future2.isDone());
// Enable send, the un-expired send should succeed on poll
isReady.set(true);
client.prepareResponse(heartbeatResponse(Errors.NONE));
consumerClient.poll(future2);
ClientResponse clientResponse = future2.value();
HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
assertEquals(Errors.NONE, response.error());
// Disable ready flag to delay send and queue another send. Disconnection should remove pending send
isReady.set(false);
RequestFuture<ClientResponse> future3 = consumerClient.send(node, heartbeat());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
disconnected.set(true);
consumerClient.poll(0);
assertTrue(future3.isDone());
assertFalse(future3.succeeded());
assertEquals(0, consumerClient.pendingRequestCount());
assertEquals(0, consumerClient.pendingRequestCount(node));
}
use of org.apache.kafka.clients.ClientResponse in project kafka by apache.
the class Fetcher method sendFetches.
/**
* Set-up a fetch request for any node that we have assigned partitions for which doesn't already have
* an in-flight fetch or pending fetch data.
* @return number of fetches sent
*/
public int sendFetches() {
Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests();
for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) {
final FetchRequest.Builder request = fetchEntry.getValue();
final Node fetchTarget = fetchEntry.getKey();
log.debug("Sending fetch for partitions {} to broker {}", request.fetchData().keySet(), fetchTarget);
client.send(fetchTarget, request).addListener(new RequestFutureListener<ClientResponse>() {
@Override
public void onSuccess(ClientResponse resp) {
FetchResponse response = (FetchResponse) resp.responseBody();
if (!matchesRequestedPartitions(request, response)) {
// obviously we expect the broker to always send us valid responses, so this check
// is mainly for test cases where mock fetch responses must be manually crafted.
log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet());
return;
}
Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet());
FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions);
for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) {
TopicPartition partition = entry.getKey();
long fetchOffset = request.fetchData().get(partition).offset;
FetchResponse.PartitionData fetchData = entry.getValue();
completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion()));
}
sensors.fetchLatency.record(resp.requestLatencyMs());
sensors.fetchThrottleTimeSensor.record(response.throttleTimeMs());
}
@Override
public void onFailure(RuntimeException e) {
log.debug("Fetch request to {} for partitions {} failed", fetchTarget, request.fetchData().keySet(), e);
}
});
}
return fetchRequestMap.size();
}
Aggregations