use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse in project kafka by apache.
the class MessageTest method testListOffsetsResponseVersions.
@Test
public void testListOffsetsResponseVersions() throws Exception {
ListOffsetsPartitionResponse partition = new ListOffsetsPartitionResponse().setErrorCode(Errors.NONE.code()).setPartitionIndex(0).setOldStyleOffsets(Collections.singletonList(321L));
List<ListOffsetsTopicResponse> topics = Collections.singletonList(new ListOffsetsTopicResponse().setName("topic").setPartitions(Collections.singletonList(partition)));
Supplier<ListOffsetsResponseData> response = () -> new ListOffsetsResponseData().setTopics(topics);
for (short version : ApiKeys.LIST_OFFSETS.allVersions()) {
ListOffsetsResponseData responseData = response.get();
if (version > 0) {
responseData.topics().get(0).partitions().get(0).setOldStyleOffsets(Collections.emptyList()).setOffset(456L).setTimestamp(123L);
}
if (version > 1) {
responseData.setThrottleTimeMs(1000);
}
if (version > 3) {
partition.setLeaderEpoch(1);
}
testEquivalentMessageRoundTrip(version, responseData);
}
}
use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse in project kafka by apache.
the class Fetcher method handleListOffsetResponse.
/**
* Callback for the response of the list offset call above.
* @param listOffsetsResponse The response from the server.
* @param future The future to be completed when the response returns. Note that any partition-level errors will
* generally fail the entire future result. The one exception is UNSUPPORTED_FOR_MESSAGE_FORMAT,
* which indicates that the broker does not support the v1 message format. Partitions with this
* particular error are simply left out of the future map. Note that the corresponding timestamp
* value of each partition may be null only for v0. In v1 and later the ListOffset API would not
* return a null timestamp (-1 is returned instead when necessary).
*/
private void handleListOffsetResponse(ListOffsetsResponse listOffsetsResponse, RequestFuture<ListOffsetResult> future) {
Map<TopicPartition, ListOffsetData> fetchedOffsets = new HashMap<>();
Set<TopicPartition> partitionsToRetry = new HashSet<>();
Set<String> unauthorizedTopics = new HashSet<>();
for (ListOffsetsTopicResponse topic : listOffsetsResponse.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
Errors error = Errors.forCode(partition.errorCode());
switch(error) {
case NONE:
if (!partition.oldStyleOffsets().isEmpty()) {
// Handle v0 response with offsets
long offset;
if (partition.oldStyleOffsets().size() > 1) {
future.raise(new IllegalStateException("Unexpected partitionData response of length " + partition.oldStyleOffsets().size()));
return;
} else {
offset = partition.oldStyleOffsets().get(0);
}
log.debug("Handling v0 ListOffsetResponse response for {}. Fetched offset {}", topicPartition, offset);
if (offset != ListOffsetsResponse.UNKNOWN_OFFSET) {
ListOffsetData offsetData = new ListOffsetData(offset, null, Optional.empty());
fetchedOffsets.put(topicPartition, offsetData);
}
} else {
// Handle v1 and later response or v0 without offsets
log.debug("Handling ListOffsetResponse response for {}. Fetched offset {}, timestamp {}", topicPartition, partition.offset(), partition.timestamp());
if (partition.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) ? Optional.empty() : Optional.of(partition.leaderEpoch());
ListOffsetData offsetData = new ListOffsetData(partition.offset(), partition.timestamp(), leaderEpoch);
fetchedOffsets.put(topicPartition, offsetData);
}
}
break;
case UNSUPPORTED_FOR_MESSAGE_FORMAT:
// The message format on the broker side is before 0.10.0, which means it does not
// support timestamps. We treat this case the same as if we weren't able to find an
// offset corresponding to the requested timestamp and leave it out of the result.
log.debug("Cannot search by timestamp for partition {} because the message format version " + "is before 0.10.0", topicPartition);
break;
case NOT_LEADER_OR_FOLLOWER:
case REPLICA_NOT_AVAILABLE:
case KAFKA_STORAGE_ERROR:
case OFFSET_NOT_AVAILABLE:
case LEADER_NOT_AVAILABLE:
case FENCED_LEADER_EPOCH:
case UNKNOWN_LEADER_EPOCH:
log.debug("Attempt to fetch offsets for partition {} failed due to {}, retrying.", topicPartition, error);
partitionsToRetry.add(topicPartition);
break;
case UNKNOWN_TOPIC_OR_PARTITION:
log.warn("Received unknown topic or partition error in ListOffset request for partition {}", topicPartition);
partitionsToRetry.add(topicPartition);
break;
case TOPIC_AUTHORIZATION_FAILED:
unauthorizedTopics.add(topicPartition.topic());
break;
default:
log.warn("Attempt to fetch offsets for partition {} failed due to unexpected exception: {}, retrying.", topicPartition, error.message());
partitionsToRetry.add(topicPartition);
}
}
}
if (!unauthorizedTopics.isEmpty())
future.raise(new TopicAuthorizationException(unauthorizedTopics));
else
future.complete(new ListOffsetResult(fetchedOffsets, partitionsToRetry));
}
use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse in project kafka by apache.
the class FetcherTest method testGetOffsetByTimeWithPartitionsRetryCouldTriggerMetadataUpdate.
@Test
public void testGetOffsetByTimeWithPartitionsRetryCouldTriggerMetadataUpdate() {
List<Errors> retriableErrors = Arrays.asList(Errors.NOT_LEADER_OR_FOLLOWER, Errors.REPLICA_NOT_AVAILABLE, Errors.KAFKA_STORAGE_ERROR, Errors.OFFSET_NOT_AVAILABLE, Errors.LEADER_NOT_AVAILABLE, Errors.FENCED_LEADER_EPOCH, Errors.UNKNOWN_LEADER_EPOCH);
final int newLeaderEpoch = 3;
MetadataResponse updatedMetadata = RequestTestUtils.metadataUpdateWithIds("dummy", 3, singletonMap(topicName, Errors.NONE), singletonMap(topicName, 4), tp -> newLeaderEpoch, topicIds);
Node originalLeader = initialUpdateResponse.buildCluster().leaderFor(tp1);
Node newLeader = updatedMetadata.buildCluster().leaderFor(tp1);
assertNotEquals(originalLeader, newLeader);
for (Errors retriableError : retriableErrors) {
buildFetcher();
subscriptions.assignFromUser(mkSet(tp0, tp1));
client.updateMetadata(initialUpdateResponse);
final long fetchTimestamp = 10L;
ListOffsetsPartitionResponse tp0NoError = new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(fetchTimestamp).setOffset(4L);
List<ListOffsetsTopicResponse> topics = Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Arrays.asList(tp0NoError, new ListOffsetsPartitionResponse().setPartitionIndex(tp1.partition()).setErrorCode(retriableError.code()).setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).setOffset(-1L))));
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(topics);
client.prepareResponseFrom(body -> {
boolean isListOffsetRequest = body instanceof ListOffsetsRequest;
if (isListOffsetRequest) {
ListOffsetsRequest request = (ListOffsetsRequest) body;
List<ListOffsetsTopic> expectedTopics = Collections.singletonList(new ListOffsetsTopic().setName(tp0.topic()).setPartitions(Arrays.asList(new ListOffsetsPartition().setPartitionIndex(tp1.partition()).setTimestamp(fetchTimestamp).setCurrentLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH), new ListOffsetsPartition().setPartitionIndex(tp0.partition()).setTimestamp(fetchTimestamp).setCurrentLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH))));
return request.topics().equals(expectedTopics);
} else {
return false;
}
}, new ListOffsetsResponse(data), originalLeader);
client.prepareMetadataUpdate(updatedMetadata);
// If the metadata wasn't updated before retrying, the fetcher would consult the original leader and hit a NOT_LEADER exception.
// We will count the answered future response in the end to verify if this is the case.
List<ListOffsetsTopicResponse> topicsWithFatalError = Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Arrays.asList(tp0NoError, new ListOffsetsPartitionResponse().setPartitionIndex(tp1.partition()).setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()).setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).setOffset(-1L))));
ListOffsetsResponseData dataWithFatalError = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(topicsWithFatalError);
client.prepareResponseFrom(new ListOffsetsResponse(dataWithFatalError), originalLeader);
// The request to new leader must only contain one partition tp1 with error.
client.prepareResponseFrom(body -> {
boolean isListOffsetRequest = body instanceof ListOffsetsRequest;
if (isListOffsetRequest) {
ListOffsetsRequest request = (ListOffsetsRequest) body;
ListOffsetsTopic requestTopic = request.topics().get(0);
ListOffsetsPartition expectedPartition = new ListOffsetsPartition().setPartitionIndex(tp1.partition()).setTimestamp(fetchTimestamp).setCurrentLeaderEpoch(newLeaderEpoch);
return expectedPartition.equals(requestTopic.partitions().get(0));
} else {
return false;
}
}, listOffsetResponse(tp1, Errors.NONE, fetchTimestamp, 5L), newLeader);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(Utils.mkMap(Utils.mkEntry(tp0, fetchTimestamp), Utils.mkEntry(tp1, fetchTimestamp)), time.timer(Integer.MAX_VALUE));
assertEquals(Utils.mkMap(Utils.mkEntry(tp0, new OffsetAndTimestamp(4L, fetchTimestamp)), Utils.mkEntry(tp1, new OffsetAndTimestamp(5L, fetchTimestamp))), offsetAndTimestampMap);
// The NOT_LEADER exception future should not be cleared as we already refreshed the metadata before
// first retry, thus never hitting.
assertEquals(1, client.numAwaitingResponses());
fetcher.close();
}
}
Aggregations