use of org.apache.kafka.common.protocol.types.SchemaException in project kafka by apache.
the class SaslClientAuthenticator method handleKafkaResponse.
private void handleKafkaResponse(RequestHeader requestHeader, byte[] responseBytes) {
AbstractResponse response;
ApiKeys apiKey;
try {
response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), requestHeader);
apiKey = ApiKeys.forId(requestHeader.apiKey());
} catch (SchemaException | IllegalArgumentException e) {
LOG.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
throw new AuthenticationException("Invalid SASL mechanism response", e);
}
switch(apiKey) {
case SASL_HANDSHAKE:
handleSaslHandshakeResponse((SaslHandshakeResponse) response);
break;
default:
throw new IllegalStateException("Unexpected API key during handshake: " + apiKey);
}
}
use of org.apache.kafka.common.protocol.types.SchemaException in project apache-kafka-on-k8s by banzaicloud.
the class SaslClientAuthenticator method receiveKafkaResponse.
private AbstractResponse receiveKafkaResponse() throws IOException {
try {
byte[] responseBytes = receiveResponseOrToken();
if (responseBytes == null)
return null;
else {
AbstractResponse response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), currentRequestHeader);
currentRequestHeader = null;
return response;
}
} catch (SchemaException | IllegalArgumentException e) {
LOG.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
setSaslState(SaslState.FAILED);
throw new IllegalSaslStateException("Invalid SASL mechanism response, server may be expecting a different protocol", e);
}
}
use of org.apache.kafka.common.protocol.types.SchemaException in project kafka by apache.
the class ConsumerProtocol method deserializeAssignment.
public static Assignment deserializeAssignment(final ByteBuffer buffer, short version) {
version = checkAssignmentVersion(version);
try {
ConsumerProtocolAssignment data = new ConsumerProtocolAssignment(new ByteBufferAccessor(buffer), version);
List<TopicPartition> assignedPartitions = new ArrayList<>();
for (ConsumerProtocolAssignment.TopicPartition tp : data.assignedPartitions()) {
for (Integer partition : tp.partitions()) {
assignedPartitions.add(new TopicPartition(tp.topic(), partition));
}
}
return new Assignment(assignedPartitions, data.userData() != null ? data.userData().duplicate() : null);
} catch (BufferUnderflowException e) {
throw new SchemaException("Buffer underflow while parsing consumer protocol's assignment", e);
}
}
use of org.apache.kafka.common.protocol.types.SchemaException in project kafka by apache.
the class SaslClientAuthenticator method receiveKafkaResponse.
private AbstractResponse receiveKafkaResponse() throws IOException {
if (netInBuffer == null)
netInBuffer = new NetworkReceive(node);
NetworkReceive receive = netInBuffer;
try {
byte[] responseBytes = receiveResponseOrToken();
if (responseBytes == null)
return null;
else {
AbstractResponse response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), currentRequestHeader);
currentRequestHeader = null;
return response;
}
} catch (BufferUnderflowException | SchemaException | IllegalArgumentException e) {
/*
* Account for the fact that during re-authentication there may be responses
* arriving for requests that were sent in the past.
*/
if (reauthInfo.reauthenticating()) {
/*
* It didn't match the current request header, so it must be unrelated to
* re-authentication. Save it so it can be processed later.
*/
receive.payload().rewind();
reauthInfo.pendingAuthenticatedReceives.add(receive);
return null;
}
log.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
setSaslState(SaslState.FAILED);
throw new IllegalSaslStateException("Invalid SASL mechanism response, server may be expecting a different protocol", e);
}
}
use of org.apache.kafka.common.protocol.types.SchemaException in project kafka by apache.
the class MirrorClient method remoteConsumerOffsets.
/**
* Translate a remote consumer group's offsets into corresponding local offsets. Topics are automatically
* renamed according to the ReplicationPolicy.
* @param consumerGroupId group ID of remote consumer group
* @param remoteClusterAlias alias of remote cluster
* @param timeout timeout
*/
public Map<TopicPartition, OffsetAndMetadata> remoteConsumerOffsets(String consumerGroupId, String remoteClusterAlias, Duration timeout) {
long deadline = System.currentTimeMillis() + timeout.toMillis();
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
try (KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerConfig, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
// checkpoint topics are not "remote topics", as they are not replicated. So we don't need
// to use ReplicationPolicy to create the checkpoint topic here.
String checkpointTopic = replicationPolicy.checkpointsTopic(remoteClusterAlias);
List<TopicPartition> checkpointAssignment = Collections.singletonList(new TopicPartition(checkpointTopic, 0));
consumer.assign(checkpointAssignment);
consumer.seekToBeginning(checkpointAssignment);
while (System.currentTimeMillis() < deadline && !endOfStream(consumer, checkpointAssignment)) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(timeout);
for (ConsumerRecord<byte[], byte[]> record : records) {
try {
Checkpoint checkpoint = Checkpoint.deserializeRecord(record);
if (checkpoint.consumerGroupId().equals(consumerGroupId)) {
offsets.put(checkpoint.topicPartition(), checkpoint.offsetAndMetadata());
}
} catch (SchemaException e) {
log.info("Could not deserialize record. Skipping.", e);
}
}
}
log.info("Consumed {} checkpoint records for {} from {}.", offsets.size(), consumerGroupId, checkpointTopic);
}
return offsets;
}
Aggregations