use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class LeaderAndIsrResponse method toStruct.
@Override
protected Struct toStruct(short version) {
Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.responseSchema(version));
List<Struct> responseDatas = new ArrayList<>(responses.size());
for (Map.Entry<TopicPartition, Errors> response : responses.entrySet()) {
Struct partitionData = struct.instance(PARTITIONS_KEY_NAME);
TopicPartition partition = response.getKey();
partitionData.set(PARTITIONS_TOPIC_KEY_NAME, partition.topic());
partitionData.set(PARTITIONS_PARTITION_KEY_NAME, partition.partition());
partitionData.set(PARTITIONS_ERROR_CODE_KEY_NAME, response.getValue().code());
responseDatas.add(partitionData);
}
struct.set(PARTITIONS_KEY_NAME, responseDatas.toArray());
struct.set(ERROR_CODE_KEY_NAME, error.code());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ListOffsetRequest method toStruct.
@Override
protected Struct toStruct() {
short version = version();
Struct struct = new Struct(ApiKeys.LIST_OFFSETS.requestSchema(version));
Map<TopicPartition, ?> targetTimes = partitionTimestamps == null ? offsetData : partitionTimestamps;
Map<String, Map<Integer, Object>> topicsData = CollectionUtils.groupDataByTopic(targetTimes);
struct.set(REPLICA_ID_KEY_NAME, replicaId);
List<Struct> topicArray = new ArrayList<>();
for (Map.Entry<String, Map<Integer, Object>> topicEntry : topicsData.entrySet()) {
Struct topicData = struct.instance(TOPICS_KEY_NAME);
topicData.set(TOPIC_KEY_NAME, topicEntry.getKey());
List<Struct> partitionArray = new ArrayList<>();
for (Map.Entry<Integer, Object> partitionEntry : topicEntry.getValue().entrySet()) {
if (version == 0) {
PartitionData offsetPartitionData = (PartitionData) partitionEntry.getValue();
Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
partitionData.set(TIMESTAMP_KEY_NAME, offsetPartitionData.timestamp);
partitionData.set(MAX_NUM_OFFSETS_KEY_NAME, offsetPartitionData.maxNumOffsets);
partitionArray.add(partitionData);
} else {
Long timestamp = (Long) partitionEntry.getValue();
Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
partitionData.set(TIMESTAMP_KEY_NAME, timestamp);
partitionArray.add(partitionData);
}
}
topicData.set(PARTITIONS_KEY_NAME, partitionArray.toArray());
topicArray.add(topicData);
}
struct.set(TOPICS_KEY_NAME, topicArray.toArray());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class CollectionUtils method groupDataByTopic.
/**
* group partitions by topic
* @param partitions
* @return partitions per topic
*/
public static Map<String, List<Integer>> groupDataByTopic(List<TopicPartition> partitions) {
Map<String, List<Integer>> partitionsByTopic = new HashMap<>();
for (TopicPartition tp : partitions) {
String topic = tp.topic();
List<Integer> topicData = partitionsByTopic.get(topic);
if (topicData == null) {
topicData = new ArrayList<>();
partitionsByTopic.put(topic, topicData);
}
topicData.add(tp.partition());
}
return partitionsByTopic;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class KafkaConsumerTest method testSeekNegative.
@Test(expected = IllegalArgumentException.class)
public void testSeekNegative() {
Properties props = new Properties();
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testSeekNegative");
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
KafkaConsumer<byte[], byte[]> consumer = newConsumer();
try {
consumer.assign(Arrays.asList(new TopicPartition("nonExistTopic", 0)));
consumer.seek(new TopicPartition("nonExistTopic", 0), -1);
} finally {
consumer.close();
}
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class WorkerSinkTask method commitOffsets.
private void commitOffsets(long now, boolean closing) {
if (currentOffsets.isEmpty())
return;
committing = true;
commitSeqno += 1;
commitStarted = now;
final Map<TopicPartition, OffsetAndMetadata> taskProvidedOffsets;
try {
taskProvidedOffsets = task.preCommit(new HashMap<>(currentOffsets));
} catch (Throwable t) {
if (closing) {
log.warn("{} Offset commit failed during close");
onCommitCompleted(t, commitSeqno);
} else {
log.error("{} Offset commit failed, rewinding to last committed offsets", this, t);
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : lastCommittedOffsets.entrySet()) {
log.debug("{} Rewinding topic partition {} to offset {}", id, entry.getKey(), entry.getValue().offset());
consumer.seek(entry.getKey(), entry.getValue().offset());
}
currentOffsets = new HashMap<>(lastCommittedOffsets);
onCommitCompleted(t, commitSeqno);
}
return;
} finally {
// Close the task if needed before committing the offsets.
if (closing)
task.close(currentOffsets.keySet());
}
if (taskProvidedOffsets.isEmpty()) {
log.debug("{} Skipping offset commit, task opted-out", this);
onCommitCompleted(null, commitSeqno);
return;
}
final Map<TopicPartition, OffsetAndMetadata> commitableOffsets = new HashMap<>(lastCommittedOffsets);
for (Map.Entry<TopicPartition, OffsetAndMetadata> taskProvidedOffsetEntry : taskProvidedOffsets.entrySet()) {
final TopicPartition partition = taskProvidedOffsetEntry.getKey();
final OffsetAndMetadata taskProvidedOffset = taskProvidedOffsetEntry.getValue();
if (commitableOffsets.containsKey(partition)) {
if (taskProvidedOffset.offset() <= currentOffsets.get(partition).offset()) {
commitableOffsets.put(partition, taskProvidedOffset);
} else {
log.warn("Ignoring invalid task provided offset {}/{} -- not yet consumed", partition, taskProvidedOffset);
}
} else {
log.warn("Ignoring invalid task provided offset {}/{} -- partition not assigned", partition, taskProvidedOffset);
}
}
if (commitableOffsets.equals(lastCommittedOffsets)) {
log.debug("{} Skipping offset commit, no change since last commit", this);
onCommitCompleted(null, commitSeqno);
return;
}
log.trace("{} Offsets to commit: {}", this, commitableOffsets);
doCommit(commitableOffsets, closing, commitSeqno);
}
Aggregations