use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method handleVoteResponse.
private boolean handleVoteResponse(RaftResponse.Inbound responseMetadata, long currentTimeMs) {
int remoteNodeId = responseMetadata.sourceId();
VoteResponseData response = (VoteResponseData) responseMetadata.data;
Errors topLevelError = Errors.forCode(response.errorCode());
if (topLevelError != Errors.NONE) {
return handleTopLevelError(topLevelError, responseMetadata);
}
if (!hasValidTopicPartition(response, log.topicPartition())) {
return false;
}
VoteResponseData.PartitionData partitionResponse = response.topics().get(0).partitions().get(0);
Errors error = Errors.forCode(partitionResponse.errorCode());
OptionalInt responseLeaderId = optionalLeaderId(partitionResponse.leaderId());
int responseEpoch = partitionResponse.leaderEpoch();
Optional<Boolean> handled = maybeHandleCommonResponse(error, responseLeaderId, responseEpoch, currentTimeMs);
if (handled.isPresent()) {
return handled.get();
} else if (error == Errors.NONE) {
if (quorum.isLeader()) {
logger.debug("Ignoring vote response {} since we already became leader for epoch {}", partitionResponse, quorum.epoch());
} else if (quorum.isCandidate()) {
CandidateState state = quorum.candidateStateOrThrow();
if (partitionResponse.voteGranted()) {
state.recordGrantedVote(remoteNodeId);
maybeTransitionToLeader(state, currentTimeMs);
} else {
state.recordRejectedVote(remoteNodeId);
// vote has become gridlocked.
if (state.isVoteRejected() && !state.isBackingOff()) {
logger.info("Insufficient remaining votes to become leader (rejected by {}). " + "We will backoff before retrying election again", state.rejectingVoters());
state.startBackingOff(currentTimeMs, binaryExponentialElectionBackoffMs(state.retries()));
}
}
} else {
logger.debug("Ignoring vote response {} since we are no longer a candidate in epoch {}", partitionResponse, quorum.epoch());
}
return true;
} else {
return handleUnexpectedError(error, responseMetadata);
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method handleBeginQuorumEpochRequest.
/**
* Handle a BeginEpoch request. This API may return the following errors:
*
* - {@link Errors#INCONSISTENT_CLUSTER_ID} if the cluster id is presented in request
* but different from this node
* - {@link Errors#BROKER_NOT_AVAILABLE} if this node is currently shutting down
* - {@link Errors#INCONSISTENT_VOTER_SET} if the request suggests inconsistent voter membership (e.g.
* if this node or the sender is not one of the current known voters)
* - {@link Errors#FENCED_LEADER_EPOCH} if the epoch is smaller than this node's epoch
*/
private BeginQuorumEpochResponseData handleBeginQuorumEpochRequest(RaftRequest.Inbound requestMetadata, long currentTimeMs) {
BeginQuorumEpochRequestData request = (BeginQuorumEpochRequestData) requestMetadata.data;
if (!hasValidClusterId(request.clusterId())) {
return new BeginQuorumEpochResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code());
}
if (!hasValidTopicPartition(request, log.topicPartition())) {
// Until we support multi-raft, we treat topic partition mismatches as invalid requests
return new BeginQuorumEpochResponseData().setErrorCode(Errors.INVALID_REQUEST.code());
}
BeginQuorumEpochRequestData.PartitionData partitionRequest = request.topics().get(0).partitions().get(0);
int requestLeaderId = partitionRequest.leaderId();
int requestEpoch = partitionRequest.leaderEpoch();
Optional<Errors> errorOpt = validateVoterOnlyRequest(requestLeaderId, requestEpoch);
if (errorOpt.isPresent()) {
return buildBeginQuorumEpochResponse(errorOpt.get());
}
maybeTransition(OptionalInt.of(requestLeaderId), requestEpoch, currentTimeMs);
return buildBeginQuorumEpochResponse(Errors.NONE);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method handleFetchSnapshotRequest.
/**
* Handle a FetchSnapshot request, similar to the Fetch request but we use {@link UnalignedRecords}
* in response because the records are not necessarily offset-aligned.
*
* This API may return the following errors:
*
* - {@link Errors#INCONSISTENT_CLUSTER_ID} if the cluster id is presented in request
* but different from this node
* - {@link Errors#BROKER_NOT_AVAILABLE} if this node is currently shutting down
* - {@link Errors#FENCED_LEADER_EPOCH} if the epoch is smaller than this node's epoch
* - {@link Errors#INVALID_REQUEST} if the request epoch is larger than the leader's current epoch
* or if either the fetch offset or the last fetched epoch is invalid
* - {@link Errors#SNAPSHOT_NOT_FOUND} if the request snapshot id does not exists
* - {@link Errors#POSITION_OUT_OF_RANGE} if the request snapshot offset out of range
*/
private FetchSnapshotResponseData handleFetchSnapshotRequest(RaftRequest.Inbound requestMetadata) {
FetchSnapshotRequestData data = (FetchSnapshotRequestData) requestMetadata.data;
if (!hasValidClusterId(data.clusterId())) {
return new FetchSnapshotResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code());
}
if (data.topics().size() != 1 && data.topics().get(0).partitions().size() != 1) {
return FetchSnapshotResponse.withTopLevelError(Errors.INVALID_REQUEST);
}
Optional<FetchSnapshotRequestData.PartitionSnapshot> partitionSnapshotOpt = FetchSnapshotRequest.forTopicPartition(data, log.topicPartition());
if (!partitionSnapshotOpt.isPresent()) {
// The Raft client assumes that there is only one topic partition.
TopicPartition unknownTopicPartition = new TopicPartition(data.topics().get(0).name(), data.topics().get(0).partitions().get(0).partition());
return FetchSnapshotResponse.singleton(unknownTopicPartition, responsePartitionSnapshot -> responsePartitionSnapshot.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()));
}
FetchSnapshotRequestData.PartitionSnapshot partitionSnapshot = partitionSnapshotOpt.get();
Optional<Errors> leaderValidation = validateLeaderOnlyRequest(partitionSnapshot.currentLeaderEpoch());
if (leaderValidation.isPresent()) {
return FetchSnapshotResponse.singleton(log.topicPartition(), responsePartitionSnapshot -> addQuorumLeader(responsePartitionSnapshot).setErrorCode(leaderValidation.get().code()));
}
OffsetAndEpoch snapshotId = new OffsetAndEpoch(partitionSnapshot.snapshotId().endOffset(), partitionSnapshot.snapshotId().epoch());
Optional<RawSnapshotReader> snapshotOpt = log.readSnapshot(snapshotId);
if (!snapshotOpt.isPresent()) {
return FetchSnapshotResponse.singleton(log.topicPartition(), responsePartitionSnapshot -> addQuorumLeader(responsePartitionSnapshot).setErrorCode(Errors.SNAPSHOT_NOT_FOUND.code()));
}
RawSnapshotReader snapshot = snapshotOpt.get();
long snapshotSize = snapshot.sizeInBytes();
if (partitionSnapshot.position() < 0 || partitionSnapshot.position() >= snapshotSize) {
return FetchSnapshotResponse.singleton(log.topicPartition(), responsePartitionSnapshot -> addQuorumLeader(responsePartitionSnapshot).setErrorCode(Errors.POSITION_OUT_OF_RANGE.code()));
}
if (partitionSnapshot.position() > Integer.MAX_VALUE) {
throw new IllegalStateException(String.format("Trying to fetch a snapshot with size (%s) and a position (%s) larger than %s", snapshotSize, partitionSnapshot.position(), Integer.MAX_VALUE));
}
int maxSnapshotSize;
try {
maxSnapshotSize = Math.toIntExact(snapshotSize);
} catch (ArithmeticException e) {
maxSnapshotSize = Integer.MAX_VALUE;
}
UnalignedRecords records = snapshot.slice(partitionSnapshot.position(), Math.min(data.maxBytes(), maxSnapshotSize));
return FetchSnapshotResponse.singleton(log.topicPartition(), responsePartitionSnapshot -> {
addQuorumLeader(responsePartitionSnapshot).snapshotId().setEndOffset(snapshotId.offset).setEpoch(snapshotId.epoch);
return responsePartitionSnapshot.setSize(snapshotSize).setPosition(partitionSnapshot.position()).setUnalignedRecords(records);
});
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class RequestResponseTest method createLeaderAndIsrResponse.
private LeaderAndIsrResponse createLeaderAndIsrResponse() {
Map<TopicPartition, Errors> responses = new HashMap<>();
responses.put(new TopicPartition("test", 0), Errors.NONE);
return new LeaderAndIsrResponse(Errors.NONE, responses);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class Fetcher method getTopicMetadata.
/**
* Get metadata for all topics present in Kafka cluster
*
* @param request The MetadataRequest to send
* @param timeout time for which getting topic metadata is attempted
* @return The map of topics with their partition information
*/
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) {
// Save the round trip if no topics are requested.
if (!request.isAllTopics() && request.topics().isEmpty())
return Collections.emptyMap();
long start = time.milliseconds();
long remaining = timeout;
do {
RequestFuture<ClientResponse> future = sendMetadataRequest(request);
client.poll(future, remaining);
if (future.failed() && !future.isRetriable())
throw future.exception();
if (future.succeeded()) {
MetadataResponse response = (MetadataResponse) future.value().responseBody();
Cluster cluster = response.cluster();
Set<String> unauthorizedTopics = cluster.unauthorizedTopics();
if (!unauthorizedTopics.isEmpty())
throw new TopicAuthorizationException(unauthorizedTopics);
boolean shouldRetry = false;
Map<String, Errors> errors = response.errors();
if (!errors.isEmpty()) {
// if there were errors, we need to check whether they were fatal or whether
// we should just retry
log.debug("Topic metadata fetch included errors: {}", errors);
for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) {
String topic = errorEntry.getKey();
Errors error = errorEntry.getValue();
if (error == Errors.INVALID_TOPIC_EXCEPTION)
throw new InvalidTopicException("Topic '" + topic + "' is invalid");
else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION)
// in the returned map
continue;
else if (error.exception() instanceof RetriableException)
shouldRetry = true;
else
throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception());
}
}
if (!shouldRetry) {
HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>();
for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic));
return topicsPartitionInfos;
}
}
long elapsed = time.milliseconds() - start;
remaining = timeout - elapsed;
if (remaining > 0) {
long backoff = Math.min(remaining, retryBackoffMs);
time.sleep(backoff);
remaining -= backoff;
}
} while (remaining > 0);
throw new TimeoutException("Timeout expired while fetching topic metadata");
}
Aggregations