use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class CollectionUtils method groupDataByTopic.
/**
* group partitions by topic
* @param partitions
* @return partitions per topic
*/
public static Map<String, List<Integer>> groupDataByTopic(List<TopicPartition> partitions) {
Map<String, List<Integer>> partitionsByTopic = new HashMap<>();
for (TopicPartition tp : partitions) {
String topic = tp.topic();
List<Integer> topicData = partitionsByTopic.get(topic);
if (topicData == null) {
topicData = new ArrayList<>();
partitionsByTopic.put(topic, topicData);
}
topicData.add(tp.partition());
}
return partitionsByTopic;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ConsumerProtocolTest method serializeDeserializeAssignment.
@Test
public void serializeDeserializeAssignment() {
List<TopicPartition> partitions = Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("bar", 2));
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(new PartitionAssignor.Assignment(partitions));
PartitionAssignor.Assignment parsedAssignment = ConsumerProtocol.deserializeAssignment(buffer);
assertEquals(toSet(partitions), toSet(parsedAssignment.partitions()));
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithError.
private void testGetOffsetsForTimesWithError(Errors errorForTp0, Errors errorForTp1, long offsetForTp0, long offsetForTp1, Long expectedOffsetForTp0, Long expectedOffsetForTp1) {
client.reset();
TopicPartition tp0 = tp;
TopicPartition tp1 = new TopicPartition(topicName, 1);
// Ensure metadata has both partition.
Cluster cluster = TestUtils.clusterWith(2, topicName, 2);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
// First try should fail due to metadata error.
client.prepareResponseFrom(listOffsetResponse(tp0, errorForTp0, offsetForTp0, offsetForTp0), cluster.leaderFor(tp0));
client.prepareResponseFrom(listOffsetResponse(tp1, errorForTp1, offsetForTp1, offsetForTp1), cluster.leaderFor(tp1));
// Second try should succeed.
client.prepareResponseFrom(listOffsetResponse(tp0, Errors.NONE, offsetForTp0, offsetForTp0), cluster.leaderFor(tp0));
client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, offsetForTp1, offsetForTp1), cluster.leaderFor(tp1));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
timestampToSearch.put(tp1, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.getOffsetsByTimes(timestampToSearch, Long.MAX_VALUE);
if (expectedOffsetForTp0 == null)
assertNull(offsetAndTimestampMap.get(tp0));
else {
assertEquals(expectedOffsetForTp0.longValue(), offsetAndTimestampMap.get(tp0).timestamp());
assertEquals(expectedOffsetForTp0.longValue(), offsetAndTimestampMap.get(tp0).offset());
}
if (expectedOffsetForTp1 == null)
assertNull(offsetAndTimestampMap.get(tp1));
else {
assertEquals(expectedOffsetForTp1.longValue(), offsetAndTimestampMap.get(tp1).timestamp());
assertEquals(expectedOffsetForTp1.longValue(), offsetAndTimestampMap.get(tp1).offset());
}
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ConsumerCoordinatorTest method testUpdateMetadataDuringRebalance.
@Test
public void testUpdateMetadataDuringRebalance() {
final String topic1 = "topic1";
final String topic2 = "topic2";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
final String consumerId = "leader";
List<String> topics = Arrays.asList(topic1, topic2);
subscriptions.subscribe(new HashSet<>(topics), rebalanceListener);
metadata.setTopics(topics);
// we only have metadata for one topic initially
metadata.update(TestUtils.singletonCluster(topic1, 1), Collections.<String>emptySet(), time.milliseconds());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady();
// prepare initial rebalance
Map<String, List<String>> memberSubscriptions = Collections.singletonMap(consumerId, topics);
partitionAssignor.prepare(Collections.singletonMap(consumerId, Collections.singletonList(tp1)));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
SyncGroupRequest sync = (SyncGroupRequest) body;
if (sync.memberId().equals(consumerId) && sync.generationId() == 1 && sync.groupAssignment().containsKey(consumerId)) {
// trigger the metadata update including both topics after the sync group request has been sent
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put(topic1, 1);
topicPartitionCounts.put(topic2, 1);
metadata.update(TestUtils.singletonCluster(topicPartitionCounts), Collections.<String>emptySet(), time.milliseconds());
return true;
}
return false;
}
}, syncGroupResponse(Collections.singletonList(tp1), Errors.NONE));
// the metadata update should trigger a second rebalance
client.prepareResponse(joinGroupLeaderResponse(2, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(Arrays.asList(tp1, tp2), Errors.NONE));
coordinator.poll(time.milliseconds());
assertFalse(coordinator.needRejoin());
assertEquals(new HashSet<>(Arrays.asList(tp1, tp2)), subscriptions.assignedPartitions());
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ConsumerInterceptorsTest method testOnCommitChain.
@Test
public void testOnCommitChain() {
List<ConsumerInterceptor<Integer, Integer>> interceptorList = new ArrayList<>();
// we are testing two different interceptors by configuring the same interceptor differently, which is not
// how it would be done in KafkaConsumer, but ok for testing interceptor callbacks
FilterConsumerInterceptor<Integer, Integer> interceptor1 = new FilterConsumerInterceptor<>(filterPartition1);
FilterConsumerInterceptor<Integer, Integer> interceptor2 = new FilterConsumerInterceptor<>(filterPartition2);
interceptorList.add(interceptor1);
interceptorList.add(interceptor2);
ConsumerInterceptors<Integer, Integer> interceptors = new ConsumerInterceptors<>(interceptorList);
// verify that onCommit is called for all interceptors in the chain
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp, new OffsetAndMetadata(0));
interceptors.onCommit(offsets);
assertEquals(2, onCommitCount);
// verify that even if one of the interceptors throws an exception, all interceptors' onCommit are called
interceptor1.injectOnCommitError(true);
interceptors.onCommit(offsets);
assertEquals(4, onCommitCount);
interceptors.close();
}
Aggregations