use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class KafkaConsumerTest method testFetchProgressWithMissingPartitionPosition.
@Test
public void testFetchProgressWithMissingPartitionPosition() {
// Verifies that we can make progress on one partition while we are awaiting
// a reset on another partition.
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 2));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumerNoAutoCommit(time, client, subscription, metadata);
consumer.assign(Arrays.asList(tp0, tp1));
consumer.seekToEnd(singleton(tp0));
consumer.seekToBeginning(singleton(tp1));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
client.prepareResponse(body -> {
ListOffsetsRequest request = (ListOffsetsRequest) body;
List<ListOffsetsPartition> partitions = request.topics().stream().flatMap(t -> {
if (t.name().equals(topic))
return Stream.of(t.partitions());
else
return Stream.empty();
}).flatMap(List::stream).collect(Collectors.toList());
ListOffsetsPartition expectedTp0 = new ListOffsetsPartition().setPartitionIndex(tp0.partition()).setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP);
ListOffsetsPartition expectedTp1 = new ListOffsetsPartition().setPartitionIndex(tp1.partition()).setTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP);
return partitions.contains(expectedTp0) && partitions.contains(expectedTp1);
}, listOffsetsResponse(Collections.singletonMap(tp0, 50L), Collections.singletonMap(tp1, Errors.NOT_LEADER_OR_FOLLOWER)));
client.prepareResponse(body -> {
FetchRequest request = (FetchRequest) body;
Map<TopicIdPartition, FetchRequest.PartitionData> fetchData = request.fetchData(topicNames);
TopicIdPartition tidp0 = new TopicIdPartition(topicIds.get(tp0.topic()), tp0);
return fetchData.keySet().equals(singleton(tidp0)) && fetchData.get(tidp0).fetchOffset == 50L;
}, fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1));
assertEquals(5, records.count());
assertEquals(singleton(tp0), records.partitions());
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testSeekBeforeException.
@Test
public void testSeekBeforeException() {
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0));
subscriptions.seek(tp0, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setRecords(records));
client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertEquals(2, fetchedRecords().get(tp0).size());
subscriptions.assignFromUser(mkSet(tp0, tp1));
subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
assertEquals(1, fetcher.sendFetches());
partitions = new HashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
assertEquals(1, fetchedRecords().get(tp0).size());
subscriptions.seek(tp1, 10);
// Should not throw OffsetOutOfRangeException after the seek
assertEmptyFetch("Should not return records or advance position after seeking to end of topic partitions");
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetchRequest method fetchData.
// For versions < 13, builds the partitionData map using only the FetchRequestData.
// For versions 13+, builds the partitionData map using both the FetchRequestData and a mapping of topic IDs to names.
public Map<TopicIdPartition, PartitionData> fetchData(Map<Uuid, String> topicNames) {
if (fetchData == null) {
synchronized (this) {
if (fetchData == null) {
fetchData = new LinkedHashMap<>();
short version = version();
data.topics().forEach(fetchTopic -> {
String name;
if (version < 13) {
// can't be null
name = fetchTopic.topic();
} else {
name = topicNames.get(fetchTopic.topicId());
}
fetchTopic.partitions().forEach(fetchPartition -> fetchData.put(new TopicIdPartition(fetchTopic.topicId(), new TopicPartition(name, fetchPartition.partition())), new PartitionData(fetchTopic.topicId(), fetchPartition.fetchOffset(), fetchPartition.logStartOffset(), fetchPartition.partitionMaxBytes(), optionalEpoch(fetchPartition.currentLeaderEpoch()), optionalEpoch(fetchPartition.lastFetchedEpoch()))));
});
}
}
}
return fetchData;
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class ProducerManager method publishMessage.
/**
* Returns {@link CompletableFuture} which will complete only after publishing of the given {@code remoteLogMetadata}
* is considered complete.
*
* @param remoteLogMetadata RemoteLogMetadata to be published
* @return
*/
public CompletableFuture<RecordMetadata> publishMessage(RemoteLogMetadata remoteLogMetadata) {
CompletableFuture<RecordMetadata> future = new CompletableFuture<>();
TopicIdPartition topicIdPartition = remoteLogMetadata.topicIdPartition();
int metadataPartitionNum = topicPartitioner.metadataPartition(topicIdPartition);
log.debug("Publishing metadata message of partition:[{}] into metadata topic partition:[{}] with payload: [{}]", topicIdPartition, metadataPartitionNum, remoteLogMetadata);
if (metadataPartitionNum >= rlmmConfig.metadataTopicPartitionsCount()) {
// This should never occur as long as metadata partitions always remain the same.
throw new KafkaException("Chosen partition no " + metadataPartitionNum + " must be less than the partition count: " + rlmmConfig.metadataTopicPartitionsCount());
}
try {
Callback callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(metadata);
}
}
};
producer.send(new ProducerRecord<>(rlmmConfig.remoteLogMetadataTopicName(), metadataPartitionNum, null, serde.serialize(remoteLogMetadata)), callback);
} catch (Exception ex) {
future.completeExceptionally(ex);
}
return future;
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class RemoteLogSegmentMetadataUpdateTransform method fromApiMessageAndVersion.
public RemoteLogSegmentMetadataUpdate fromApiMessageAndVersion(ApiMessageAndVersion apiMessageAndVersion) {
RemoteLogSegmentMetadataUpdateRecord record = (RemoteLogSegmentMetadataUpdateRecord) apiMessageAndVersion.message();
RemoteLogSegmentMetadataUpdateRecord.RemoteLogSegmentIdEntry entry = record.remoteLogSegmentId();
TopicIdPartition topicIdPartition = new TopicIdPartition(entry.topicIdPartition().id(), new TopicPartition(entry.topicIdPartition().name(), entry.topicIdPartition().partition()));
return new RemoteLogSegmentMetadataUpdate(new RemoteLogSegmentId(topicIdPartition, entry.id()), record.eventTimestampMs(), RemoteLogSegmentState.forId(record.remoteLogSegmentState()), record.brokerId());
}
Aggregations