use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class LeaderAndIsrResponse method toStruct.
@Override
protected Struct toStruct(short version) {
Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.responseSchema(version));
List<Struct> responseDatas = new ArrayList<>(responses.size());
for (Map.Entry<TopicPartition, Errors> response : responses.entrySet()) {
Struct partitionData = struct.instance(PARTITIONS_KEY_NAME);
TopicPartition partition = response.getKey();
partitionData.set(PARTITIONS_TOPIC_KEY_NAME, partition.topic());
partitionData.set(PARTITIONS_PARTITION_KEY_NAME, partition.partition());
partitionData.set(PARTITIONS_ERROR_CODE_KEY_NAME, response.getValue().code());
responseDatas.add(partitionData);
}
struct.set(PARTITIONS_KEY_NAME, responseDatas.toArray());
struct.set(ERROR_CODE_KEY_NAME, error.code());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ListOffsetRequest method toStruct.
@Override
protected Struct toStruct() {
short version = version();
Struct struct = new Struct(ApiKeys.LIST_OFFSETS.requestSchema(version));
Map<TopicPartition, ?> targetTimes = partitionTimestamps == null ? offsetData : partitionTimestamps;
Map<String, Map<Integer, Object>> topicsData = CollectionUtils.groupDataByTopic(targetTimes);
struct.set(REPLICA_ID_KEY_NAME, replicaId);
List<Struct> topicArray = new ArrayList<>();
for (Map.Entry<String, Map<Integer, Object>> topicEntry : topicsData.entrySet()) {
Struct topicData = struct.instance(TOPICS_KEY_NAME);
topicData.set(TOPIC_KEY_NAME, topicEntry.getKey());
List<Struct> partitionArray = new ArrayList<>();
for (Map.Entry<Integer, Object> partitionEntry : topicEntry.getValue().entrySet()) {
if (version == 0) {
PartitionData offsetPartitionData = (PartitionData) partitionEntry.getValue();
Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
partitionData.set(TIMESTAMP_KEY_NAME, offsetPartitionData.timestamp);
partitionData.set(MAX_NUM_OFFSETS_KEY_NAME, offsetPartitionData.maxNumOffsets);
partitionArray.add(partitionData);
} else {
Long timestamp = (Long) partitionEntry.getValue();
Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
partitionData.set(TIMESTAMP_KEY_NAME, timestamp);
partitionArray.add(partitionData);
}
}
topicData.set(PARTITIONS_KEY_NAME, partitionArray.toArray());
topicArray.add(topicData);
}
struct.set(TOPICS_KEY_NAME, topicArray.toArray());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class KafkaConsumerTest method prepareOffsetCommitResponse.
private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final Map<TopicPartition, Long> partitionOffsets) {
final AtomicBoolean commitReceived = new AtomicBoolean(true);
Map<TopicPartition, Errors> response = new HashMap<>();
for (TopicPartition partition : partitionOffsets.keySet()) response.put(partition, Errors.NONE);
client.prepareResponseFrom(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
OffsetCommitRequest.PartitionData partitionData = commitRequest.offsetData().get(partitionOffset.getKey());
// verify that the expected offset has been committed
if (partitionData.offset != partitionOffset.getValue()) {
commitReceived.set(false);
return false;
}
}
return true;
}
}, offsetCommitResponse(response), coordinator);
return commitReceived;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class KafkaConsumerTest method fetchResponseWithUnexpectedPartitionIsIgnored.
@Test
public void fetchResponseWithUnexpectedPartitionIsIgnored() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
// adjust auto commit interval lower than heartbeat so we don't need to deal with
// a concurrent heartbeat request
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(singletonMap(topic, 1));
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
fetches1.put(tp0, new FetchInfo(0, 1));
// not assigned and not fetched
fetches1.put(t2p0, new FetchInfo(0, 10));
client.prepareResponseFrom(fetchResponse(fetches1), node);
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(0, records.count());
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitEnabled.
/**
* Verify that when a consumer changes its topic subscription its assigned partitions
* do not immediately change, and the latest consumed offsets of its to-be-revoked
* partitions are properly committed (when auto-commit is enabled).
* Upon unsubscribing from subscribed topics the consumer subscription and assignment
* are both updated right away but its consumed offsets are not auto committed.
*/
@Test
public void testSubscriptionChangesWithAutoCommitEnabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
// adjust auto commit interval lower than heartbeat so we don't need to deal with
// a concurrent heartbeat request
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
tpCounts.put(topic3, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
// initial subscription
consumer.subscribe(Arrays.asList(topic, topic2), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2));
assertTrue(consumer.assignment().isEmpty());
// mock rebalance responses
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0, t2p0), null);
consumer.poll(0);
// verify that subscription is still the same, and now assignment has caught up
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2));
assertTrue(consumer.assignment().size() == 2);
assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0));
// mock a response to the outstanding fetch so that we have data available on the next poll
Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
fetches1.put(tp0, new FetchInfo(0, 1));
fetches1.put(t2p0, new FetchInfo(0, 10));
client.respondFrom(fetchResponse(fetches1), node);
client.poll(0, time.milliseconds());
ConsumerRecords<String, String> records = consumer.poll(0);
// clear out the prefetch so it doesn't interfere with the rest of the test
fetches1.put(tp0, new FetchInfo(1, 0));
fetches1.put(t2p0, new FetchInfo(10, 0));
client.respondFrom(fetchResponse(fetches1), node);
client.poll(0, time.milliseconds());
// verify that the fetch occurred as expected
assertEquals(11, records.count());
assertEquals(1L, consumer.position(tp0));
assertEquals(10L, consumer.position(t2p0));
// subscription change
consumer.subscribe(Arrays.asList(topic, topic3), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3));
assertTrue(consumer.assignment().size() == 2);
assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0));
// mock the offset commit response for to be revoked partitions
Map<TopicPartition, Long> partitionOffsets1 = new HashMap<>();
partitionOffsets1.put(tp0, 1L);
partitionOffsets1.put(t2p0, 10L);
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, partitionOffsets1);
// mock rebalance responses
prepareRebalance(client, node, assignor, Arrays.asList(tp0, t3p0), coordinator);
// mock a response to the next fetch from the new assignment
Map<TopicPartition, FetchInfo> fetches2 = new HashMap<>();
fetches2.put(tp0, new FetchInfo(1, 1));
fetches2.put(t3p0, new FetchInfo(0, 100));
client.prepareResponse(fetchResponse(fetches2));
records = consumer.poll(0);
// verify that the fetch occurred as expected
assertEquals(101, records.count());
assertEquals(2L, consumer.position(tp0));
assertEquals(100L, consumer.position(t3p0));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
// verify that subscription is still the same, and now assignment has caught up
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3));
assertTrue(consumer.assignment().size() == 2);
assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t3p0));
consumer.unsubscribe();
// verify that subscription and assignment are both cleared
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
client.requests().clear();
consumer.close();
}
Aggregations