use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class ReplicationControlManagerTest method buildElectLeadersResponse.
private ElectLeadersResponseData buildElectLeadersResponse(Errors topLevelError, boolean electAllPartitions, Map<TopicPartition, ApiError> errors) {
Map<String, List<Map.Entry<TopicPartition, ApiError>>> errorsByTopic = errors.entrySet().stream().collect(Collectors.groupingBy(entry -> entry.getKey().topic()));
ElectLeadersResponseData response = new ElectLeadersResponseData().setErrorCode(topLevelError.code());
errorsByTopic.forEach((topic, partitionErrors) -> {
ReplicaElectionResult electionResult = new ReplicaElectionResult().setTopic(topic);
electionResult.setPartitionResult(partitionErrors.stream().filter(entry -> !electAllPartitions || entry.getValue().error() != ELECTION_NOT_NEEDED).map(entry -> {
TopicPartition topicPartition = entry.getKey();
ApiError error = entry.getValue();
return new PartitionResult().setPartitionId(topicPartition.partition()).setErrorCode(error.error().code()).setErrorMessage(error.message());
}).collect(Collectors.toList()));
response.replicaElectionResults().add(electionResult);
});
return response;
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method handleBeginQuorumEpochResponse.
private boolean handleBeginQuorumEpochResponse(RaftResponse.Inbound responseMetadata, long currentTimeMs) {
int remoteNodeId = responseMetadata.sourceId();
BeginQuorumEpochResponseData response = (BeginQuorumEpochResponseData) responseMetadata.data;
Errors topLevelError = Errors.forCode(response.errorCode());
if (topLevelError != Errors.NONE) {
return handleTopLevelError(topLevelError, responseMetadata);
}
if (!hasValidTopicPartition(response, log.topicPartition())) {
return false;
}
BeginQuorumEpochResponseData.PartitionData partitionResponse = response.topics().get(0).partitions().get(0);
Errors partitionError = Errors.forCode(partitionResponse.errorCode());
OptionalInt responseLeaderId = optionalLeaderId(partitionResponse.leaderId());
int responseEpoch = partitionResponse.leaderEpoch();
Optional<Boolean> handled = maybeHandleCommonResponse(partitionError, responseLeaderId, responseEpoch, currentTimeMs);
if (handled.isPresent()) {
return handled.get();
} else if (partitionError == Errors.NONE) {
if (quorum.isLeader()) {
LeaderState<T> state = quorum.leaderStateOrThrow();
state.addAcknowledgementFrom(remoteNodeId);
} else {
logger.debug("Ignoring BeginQuorumEpoch response {} since " + "this node is not the leader anymore", response);
}
return true;
} else {
return handleUnexpectedError(partitionError, responseMetadata);
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method handleFetchResponse.
private boolean handleFetchResponse(RaftResponse.Inbound responseMetadata, long currentTimeMs) {
FetchResponseData response = (FetchResponseData) responseMetadata.data;
Errors topLevelError = Errors.forCode(response.errorCode());
if (topLevelError != Errors.NONE) {
return handleTopLevelError(topLevelError, responseMetadata);
}
if (!RaftUtil.hasValidTopicPartition(response, log.topicPartition(), log.topicId())) {
return false;
}
// If the ID is valid, we can set the topic name.
response.responses().get(0).setTopic(log.topicPartition().topic());
FetchResponseData.PartitionData partitionResponse = response.responses().get(0).partitions().get(0);
FetchResponseData.LeaderIdAndEpoch currentLeaderIdAndEpoch = partitionResponse.currentLeader();
OptionalInt responseLeaderId = optionalLeaderId(currentLeaderIdAndEpoch.leaderId());
int responseEpoch = currentLeaderIdAndEpoch.leaderEpoch();
Errors error = Errors.forCode(partitionResponse.errorCode());
Optional<Boolean> handled = maybeHandleCommonResponse(error, responseLeaderId, responseEpoch, currentTimeMs);
if (handled.isPresent()) {
return handled.get();
}
FollowerState state = quorum.followerStateOrThrow();
if (error == Errors.NONE) {
FetchResponseData.EpochEndOffset divergingEpoch = partitionResponse.divergingEpoch();
if (divergingEpoch.epoch() >= 0) {
// The leader is asking us to truncate before continuing
final OffsetAndEpoch divergingOffsetAndEpoch = new OffsetAndEpoch(divergingEpoch.endOffset(), divergingEpoch.epoch());
state.highWatermark().ifPresent(highWatermark -> {
if (divergingOffsetAndEpoch.offset < highWatermark.offset) {
throw new KafkaException("The leader requested truncation to offset " + divergingOffsetAndEpoch.offset + ", which is below the current high watermark" + " " + highWatermark);
}
});
long truncationOffset = log.truncateToEndOffset(divergingOffsetAndEpoch);
logger.info("Truncated to offset {} from Fetch response from leader {}", truncationOffset, quorum.leaderIdOrSentinel());
} else if (partitionResponse.snapshotId().epoch() >= 0 || partitionResponse.snapshotId().endOffset() >= 0) {
if (partitionResponse.snapshotId().epoch() < 0) {
logger.error("The leader sent a snapshot id with a valid end offset {} but with an invalid epoch {}", partitionResponse.snapshotId().endOffset(), partitionResponse.snapshotId().epoch());
return false;
} else if (partitionResponse.snapshotId().endOffset() < 0) {
logger.error("The leader sent a snapshot id with a valid epoch {} but with an invalid end offset {}", partitionResponse.snapshotId().epoch(), partitionResponse.snapshotId().endOffset());
return false;
} else {
final OffsetAndEpoch snapshotId = new OffsetAndEpoch(partitionResponse.snapshotId().endOffset(), partitionResponse.snapshotId().epoch());
// Do not validate the snapshot id against the local replicated log
// since this snapshot is expected to reference offsets and epochs
// greater than the log end offset and high-watermark
state.setFetchingSnapshot(log.storeSnapshot(snapshotId));
}
} else {
Records records = FetchResponse.recordsOrFail(partitionResponse);
if (records.sizeInBytes() > 0) {
appendAsFollower(records);
}
OptionalLong highWatermark = partitionResponse.highWatermark() < 0 ? OptionalLong.empty() : OptionalLong.of(partitionResponse.highWatermark());
updateFollowerHighWatermark(state, highWatermark);
}
state.resetFetchTimeout(currentTimeMs);
return true;
} else {
return handleUnexpectedError(error, responseMetadata);
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method handleVoteRequest.
/**
* Handle a Vote request. This API may return the following errors:
*
* - {@link Errors#INCONSISTENT_CLUSTER_ID} if the cluster id is presented in request
* but different from this node
* - {@link Errors#BROKER_NOT_AVAILABLE} if this node is currently shutting down
* - {@link Errors#FENCED_LEADER_EPOCH} if the epoch is smaller than this node's epoch
* - {@link Errors#INCONSISTENT_VOTER_SET} if the request suggests inconsistent voter membership (e.g.
* if this node or the sender is not one of the current known voters)
* - {@link Errors#INVALID_REQUEST} if the last epoch or offset are invalid
*/
private VoteResponseData handleVoteRequest(RaftRequest.Inbound requestMetadata) {
VoteRequestData request = (VoteRequestData) requestMetadata.data;
if (!hasValidClusterId(request.clusterId())) {
return new VoteResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code());
}
if (!hasValidTopicPartition(request, log.topicPartition())) {
// Until we support multi-raft, we treat individual topic partition mismatches as invalid requests
return new VoteResponseData().setErrorCode(Errors.INVALID_REQUEST.code());
}
VoteRequestData.PartitionData partitionRequest = request.topics().get(0).partitions().get(0);
int candidateId = partitionRequest.candidateId();
int candidateEpoch = partitionRequest.candidateEpoch();
int lastEpoch = partitionRequest.lastOffsetEpoch();
long lastEpochEndOffset = partitionRequest.lastOffset();
if (lastEpochEndOffset < 0 || lastEpoch < 0 || lastEpoch >= candidateEpoch) {
return buildVoteResponse(Errors.INVALID_REQUEST, false);
}
Optional<Errors> errorOpt = validateVoterOnlyRequest(candidateId, candidateEpoch);
if (errorOpt.isPresent()) {
return buildVoteResponse(errorOpt.get(), false);
}
if (candidateEpoch > quorum.epoch()) {
transitionToUnattached(candidateEpoch);
}
OffsetAndEpoch lastEpochEndOffsetAndEpoch = new OffsetAndEpoch(lastEpochEndOffset, lastEpoch);
boolean voteGranted = quorum.canGrantVote(candidateId, lastEpochEndOffsetAndEpoch.compareTo(endOffset()) >= 0);
if (voteGranted && quorum.isUnattached()) {
transitionToVoted(candidateId, candidateEpoch);
}
logger.info("Vote request {} with epoch {} is {}", request, candidateEpoch, voteGranted ? "granted" : "rejected");
return buildVoteResponse(Errors.NONE, voteGranted);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaRaftClient method tryCompleteFetchRequest.
private FetchResponseData tryCompleteFetchRequest(int replicaId, FetchRequestData.FetchPartition request, long currentTimeMs) {
try {
Optional<Errors> errorOpt = validateLeaderOnlyRequest(request.currentLeaderEpoch());
if (errorOpt.isPresent()) {
return buildEmptyFetchResponse(errorOpt.get(), Optional.empty());
}
long fetchOffset = request.fetchOffset();
int lastFetchedEpoch = request.lastFetchedEpoch();
LeaderState<T> state = quorum.leaderStateOrThrow();
ValidOffsetAndEpoch validOffsetAndEpoch = log.validateOffsetAndEpoch(fetchOffset, lastFetchedEpoch);
final Records records;
if (validOffsetAndEpoch.kind() == ValidOffsetAndEpoch.Kind.VALID) {
LogFetchInfo info = log.read(fetchOffset, Isolation.UNCOMMITTED);
if (state.updateReplicaState(replicaId, currentTimeMs, info.startOffsetMetadata)) {
onUpdateLeaderHighWatermark(state, currentTimeMs);
}
records = info.records;
} else {
records = MemoryRecords.EMPTY;
}
return buildFetchResponse(Errors.NONE, records, validOffsetAndEpoch, state.highWatermark());
} catch (Exception e) {
logger.error("Caught unexpected error in fetch completion of request {}", request, e);
return buildEmptyFetchResponse(Errors.UNKNOWN_SERVER_ERROR, Optional.empty());
}
}
Aggregations