use of org.apache.kafka.common.requests.RequestHeader in project kafka by apache.
the class NetworkClientTest method testAuthenticationFailureWithInFlightMetadataRequest.
@Test
public void testAuthenticationFailureWithInFlightMetadataRequest() {
int refreshBackoffMs = 50;
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
Metadata metadata = new Metadata(refreshBackoffMs, 5000, new LogContext(), new ClusterResourceListeners());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
Cluster cluster = metadata.fetch();
Node node1 = cluster.nodes().get(0);
Node node2 = cluster.nodes().get(1);
NetworkClient client = createNetworkClientWithNoVersionDiscovery(metadata);
awaitReady(client, node1);
metadata.requestUpdate();
time.sleep(refreshBackoffMs);
client.poll(0, time.milliseconds());
Optional<Node> nodeWithPendingMetadataOpt = cluster.nodes().stream().filter(node -> client.hasInFlightRequests(node.idString())).findFirst();
assertEquals(Optional.of(node1), nodeWithPendingMetadataOpt);
assertFalse(client.ready(node2, time.milliseconds()));
selector.serverAuthenticationFailed(node2.idString());
client.poll(0, time.milliseconds());
assertNotNull(client.authenticationException(node2));
ByteBuffer requestBuffer = selector.completedSendBuffers().get(0).buffer();
RequestHeader header = parseHeader(requestBuffer);
assertEquals(ApiKeys.METADATA, header.apiKey());
ByteBuffer responseBuffer = RequestTestUtils.serializeResponseWithHeader(metadataResponse, header.apiVersion(), header.correlationId());
selector.delayedReceive(new DelayedReceive(node1.idString(), new NetworkReceive(node1.idString(), responseBuffer)));
int initialUpdateVersion = metadata.updateVersion();
client.poll(0, time.milliseconds());
assertEquals(initialUpdateVersion + 1, metadata.updateVersion());
}
use of org.apache.kafka.common.requests.RequestHeader in project kafka by apache.
the class NetworkClient method doSend.
private void doSend(ClientRequest clientRequest, boolean isInternalRequest, long now, AbstractRequest request) {
String destination = clientRequest.destination();
RequestHeader header = clientRequest.makeHeader(request.version());
if (log.isDebugEnabled()) {
log.debug("Sending {} request with header {} and timeout {} to node {}: {}", clientRequest.apiKey(), header, clientRequest.requestTimeoutMs(), destination, request);
}
Send send = request.toSend(header);
InFlightRequest inFlightRequest = new InFlightRequest(clientRequest, header, isInternalRequest, request, send, now);
this.inFlightRequests.add(inFlightRequest);
selector.send(new NetworkSend(clientRequest.destination(), send));
}
use of org.apache.kafka.common.requests.RequestHeader in project kafka by apache.
the class Sender method handleProduceResponse.
/**
* Handle a produce response
*/
private void handleProduceResponse(ClientResponse response, Map<TopicPartition, ProducerBatch> batches, long now) {
RequestHeader requestHeader = response.requestHeader();
int correlationId = requestHeader.correlationId();
if (response.wasDisconnected()) {
log.trace("Cancelled request with header {} due to node {} being disconnected", requestHeader, response.destination());
for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NETWORK_EXCEPTION, String.format("Disconnected from node %s", response.destination())), correlationId, now);
} else if (response.versionMismatch() != null) {
log.warn("Cancelled request {} due to a version mismatch with node {}", response, response.destination(), response.versionMismatch());
for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.UNSUPPORTED_VERSION), correlationId, now);
} else {
log.trace("Received produce response from node {} with correlation id {}", response.destination(), correlationId);
// if we have a response, parse it
if (response.hasResponse()) {
// Sender should exercise PartitionProduceResponse rather than ProduceResponse.PartitionResponse
// https://issues.apache.org/jira/browse/KAFKA-10696
ProduceResponse produceResponse = (ProduceResponse) response.responseBody();
produceResponse.data().responses().forEach(r -> r.partitionResponses().forEach(p -> {
TopicPartition tp = new TopicPartition(r.name(), p.index());
ProduceResponse.PartitionResponse partResp = new ProduceResponse.PartitionResponse(Errors.forCode(p.errorCode()), p.baseOffset(), p.logAppendTimeMs(), p.logStartOffset(), p.recordErrors().stream().map(e -> new ProduceResponse.RecordError(e.batchIndex(), e.batchIndexErrorMessage())).collect(Collectors.toList()), p.errorMessage());
ProducerBatch batch = batches.get(tp);
completeBatch(batch, partResp, correlationId, now);
}));
this.sensors.recordLatency(response.destination(), response.requestLatencyMs());
} else {
// this is the acks = 0 case, just complete all requests
for (ProducerBatch batch : batches.values()) {
completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NONE), correlationId, now);
}
}
}
}
use of org.apache.kafka.common.requests.RequestHeader in project kafka by apache.
the class SaslAuthenticatorTest method testInvalidApiVersionsRequest.
/**
* Tests that invalid ApiVersionRequest is handled by the server correctly and
* returns an INVALID_REQUEST error.
*/
@Test
public void testInvalidApiVersionsRequest() throws Exception {
short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion();
SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT;
configureMechanisms("PLAIN", Arrays.asList("PLAIN"));
server = createEchoServer(securityProtocol);
// Send ApiVersionsRequest with invalid version and validate error response.
String node = "1";
short version = ApiKeys.API_VERSIONS.latestVersion();
createClientConnection(SecurityProtocol.PLAINTEXT, node);
RequestHeader header = new RequestHeader(ApiKeys.API_VERSIONS, version, "someclient", 1);
ApiVersionsRequest request = new ApiVersionsRequest(new ApiVersionsRequestData().setClientSoftwareName(" ").setClientSoftwareVersion(" "), version);
selector.send(new NetworkSend(node, request.toSend(header)));
ByteBuffer responseBuffer = waitForResponse();
ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion(version));
ApiVersionsResponse response = ApiVersionsResponse.parse(responseBuffer, version);
assertEquals(Errors.INVALID_REQUEST.code(), response.data().errorCode());
// Send ApiVersionsRequest with a supported version. This should succeed.
sendVersionRequestReceiveResponse(node);
// Test that client can authenticate successfully
sendHandshakeRequestReceiveResponse(node, handshakeVersion);
authenticateUsingSaslPlainAndCheckConnection(node, handshakeVersion > 0);
}
use of org.apache.kafka.common.requests.RequestHeader in project kafka by apache.
the class SaslAuthenticatorTest method testConvertListOffsetResponseToSaslHandshakeResponse.
@Test
public void testConvertListOffsetResponseToSaslHandshakeResponse() {
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName("topic").setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setErrorCode(Errors.NONE.code()).setLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH).setPartitionIndex(0).setOffset(0).setTimestamp(0)))));
ListOffsetsResponse response = new ListOffsetsResponse(data);
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(response, LIST_OFFSETS.latestVersion(), 0);
final RequestHeader header0 = new RequestHeader(LIST_OFFSETS, LIST_OFFSETS.latestVersion(), "id", SaslClientAuthenticator.MIN_RESERVED_CORRELATION_ID);
assertThrows(SchemaException.class, () -> NetworkClient.parseResponse(buffer.duplicate(), header0));
final RequestHeader header1 = new RequestHeader(LIST_OFFSETS, LIST_OFFSETS.latestVersion(), "id", 1);
assertThrows(IllegalStateException.class, () -> NetworkClient.parseResponse(buffer.duplicate(), header1));
}
Aggregations