use of io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadata.CommitRecordMetadataAndOffset in project starlight-for-kafka by datastax.
the class GroupMetadataManagerTest method testLoadTransactionalOffsetCommitsFromMultipleProducers.
@Test
public void testLoadTransactionalOffsetCommitsFromMultipleProducers() throws Exception {
long firstProducerId = 1000L;
short firstProducerEpoch = 2;
long secondProducerId = 1001L;
short secondProducerEpoch = 3;
Map<TopicPartition, Long> committedOffsetsFirstProducer = new HashMap<>();
committedOffsetsFirstProducer.put(new TopicPartition("foo", 0), 23L);
committedOffsetsFirstProducer.put(new TopicPartition("foo", 1), 455L);
committedOffsetsFirstProducer.put(new TopicPartition("bar", 0), 8992L);
Map<TopicPartition, Long> committedOffsetsSecondProducer = new HashMap<>();
committedOffsetsSecondProducer.put(new TopicPartition("foo", 2), 231L);
committedOffsetsSecondProducer.put(new TopicPartition("foo", 3), 4551L);
committedOffsetsSecondProducer.put(new TopicPartition("bar", 1), 89921L);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int nextOffset = 0;
int firstProduceRecordOffset = nextOffset;
nextOffset += appendTransactionalOffsetCommits(buffer, firstProducerId, firstProducerEpoch, nextOffset, committedOffsetsFirstProducer, NAMESPACE_PREFIX);
nextOffset += completeTransactionalOffsetCommit(buffer, firstProducerId, firstProducerEpoch, nextOffset, true);
int secondProduceRecordOffset = nextOffset;
nextOffset += appendTransactionalOffsetCommits(buffer, secondProducerId, secondProducerEpoch, nextOffset, committedOffsetsSecondProducer, NAMESPACE_PREFIX);
nextOffset += completeTransactionalOffsetCommit(buffer, secondProducerId, secondProducerEpoch, nextOffset, true);
buffer.flip();
byte[] key = groupMetadataKey(groupId);
Producer<ByteBuffer> producer = groupMetadataManager.getOffsetsTopicProducer(groupPartitionId).get();
producer.newMessage().keyBytes(key).value(buffer).eventTime(Time.SYSTEM.milliseconds()).send();
CompletableFuture<GroupMetadata> onLoadedFuture = new CompletableFuture<>();
groupMetadataManager.scheduleLoadGroupAndOffsets(groupPartitionId, groupMetadata -> onLoadedFuture.complete(groupMetadata)).get();
GroupMetadata group = onLoadedFuture.get();
GroupMetadata groupInCache = groupMetadataManager.getGroup(groupId).orElseGet(() -> {
fail("Group was not loaded into the cache");
return null;
});
assertSame(group, groupInCache);
assertEquals(groupId, group.groupId());
assertEquals(Empty, group.currentState());
// Ensure that only the committed offsets are materialized, and that there are no pending commits
// for the producer. This allows us to be certain that the aborted offset commits are truly discarded.
assertEquals(committedOffsetsFirstProducer.size() + committedOffsetsSecondProducer.size(), group.allOffsets().size());
committedOffsetsFirstProducer.forEach((tp, offset) -> {
assertEquals(Optional.of(offset), group.offset(tp, NAMESPACE_PREFIX).map(OffsetAndMetadata::offset));
assertEquals(Optional.of((long) firstProduceRecordOffset), group.offsetWithRecordMetadata(tp).flatMap(CommitRecordMetadataAndOffset::appendedBatchOffset));
});
committedOffsetsSecondProducer.forEach((tp, offset) -> {
assertEquals(Optional.of(offset), group.offset(tp, NAMESPACE_PREFIX).map(OffsetAndMetadata::offset));
assertEquals(Optional.of((long) secondProduceRecordOffset), group.offsetWithRecordMetadata(tp).flatMap(CommitRecordMetadataAndOffset::appendedBatchOffset));
});
}
use of io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadata.CommitRecordMetadataAndOffset in project starlight-for-kafka by datastax.
the class GroupMetadataManager method unsafeLoadNextMetadataMessage.
private void unsafeLoadNextMetadataMessage(CompletableFuture<Reader<ByteBuffer>> metadataConsumer, MessageId endMessageId, CompletableFuture<Void> resultFuture, Consumer<GroupMetadata> onGroupLoaded, Map<GroupTopicPartition, CommitRecordMetadataAndOffset> loadedOffsets, Map<Long, Map<GroupTopicPartition, CommitRecordMetadataAndOffset>> pendingOffsets, Map<String, GroupMetadata> loadedGroups, Set<String> removedGroups) {
if (shuttingDown.get()) {
resultFuture.completeExceptionally(new Exception("Group metadata manager is shutting down"));
return;
}
if (log.isTraceEnabled()) {
log.trace("Reading the next metadata message from topic {}", metadataConsumer.join().getTopic());
}
BiConsumer<Message<ByteBuffer>, Throwable> readNextComplete = (message, cause) -> {
if (log.isTraceEnabled()) {
log.trace("Metadata consumer received a metadata message from {} @ {}", metadataConsumer.join().getTopic(), message.getMessageId());
}
if (null != cause) {
resultFuture.completeExceptionally(cause);
return;
}
if (message.getMessageId().compareTo(endMessageId) >= 0) {
// reach the end of partition
processLoadedAndRemovedGroups(resultFuture, onGroupLoaded, loadedOffsets, pendingOffsets, loadedGroups, removedGroups);
return;
}
if (!message.hasKey()) {
// the messages without key are placeholders
loadNextMetadataMessage(metadataConsumer, endMessageId, resultFuture, onGroupLoaded, loadedOffsets, pendingOffsets, loadedGroups, removedGroups);
return;
}
ByteBuffer buffer = message.getValue();
MemoryRecords memRecords = MemoryRecords.readableRecords(buffer);
memRecords.batches().forEach(batch -> {
boolean isTxnOffsetCommit = batch.isTransactional();
if (batch.isControlBatch()) {
Iterator<Record> recordIterator = batch.iterator();
if (recordIterator.hasNext()) {
Record record = recordIterator.next();
ControlRecordType controlRecord = ControlRecordType.parse(record.key());
if (controlRecord == ControlRecordType.COMMIT) {
pendingOffsets.getOrDefault(batch.producerId(), Collections.emptyMap()).forEach((groupTopicPartition, commitRecordMetadataAndOffset) -> {
if (!loadedOffsets.containsKey(groupTopicPartition) || loadedOffsets.get(groupTopicPartition).olderThan(commitRecordMetadataAndOffset)) {
loadedOffsets.put(groupTopicPartition, commitRecordMetadataAndOffset);
}
});
}
pendingOffsets.remove(batch.producerId());
}
} else {
Optional<Long> batchBaseOffset = Optional.empty();
for (Record record : batch) {
checkArgument(record.hasKey(), "Group metadata/offset entry key should not be null");
if (!batchBaseOffset.isPresent()) {
batchBaseOffset = Optional.of(record.offset());
}
BaseKey bk = readMessageKey(record.key());
if (log.isTraceEnabled()) {
log.trace("Applying metadata record {} received from {}", bk, metadataConsumer.join().getTopic());
}
if (bk instanceof OffsetKey) {
OffsetKey offsetKey = (OffsetKey) bk;
if (isTxnOffsetCommit && !pendingOffsets.containsKey(batch.producerId())) {
pendingOffsets.put(batch.producerId(), new HashMap<>());
}
// load offset
GroupTopicPartition groupTopicPartition = offsetKey.key();
if (!record.hasValue()) {
if (isTxnOffsetCommit) {
pendingOffsets.get(batch.producerId()).remove(groupTopicPartition);
} else {
loadedOffsets.remove(groupTopicPartition);
}
} else {
OffsetAndMetadata offsetAndMetadata = readOffsetMessageValue(record.value());
CommitRecordMetadataAndOffset commitRecordMetadataAndOffset = new CommitRecordMetadataAndOffset(batchBaseOffset, offsetAndMetadata);
if (isTxnOffsetCommit) {
pendingOffsets.get(batch.producerId()).put(groupTopicPartition, commitRecordMetadataAndOffset);
} else {
loadedOffsets.put(groupTopicPartition, commitRecordMetadataAndOffset);
}
}
} else if (bk instanceof GroupMetadataKey) {
GroupMetadataKey groupMetadataKey = (GroupMetadataKey) bk;
String gid = groupMetadataKey.key();
GroupMetadata gm = readGroupMessageValue(gid, record.value());
if (gm != null) {
removedGroups.remove(gid);
loadedGroups.put(gid, gm);
} else {
loadedGroups.remove(gid);
removedGroups.add(gid);
}
} else {
resultFuture.completeExceptionally(new IllegalStateException("Unexpected message key " + bk + " while loading offsets and group metadata"));
return;
}
}
}
});
loadNextMetadataMessage(metadataConsumer, endMessageId, resultFuture, onGroupLoaded, loadedOffsets, pendingOffsets, loadedGroups, removedGroups);
};
metadataConsumer.thenComposeAsync(r -> r.readNextAsync()).whenCompleteAsync((message, cause) -> {
try {
readNextComplete.accept(message, cause);
} catch (Throwable completeCause) {
log.error("Unknown exception caught when processing the received metadata message from topic {}", metadataConsumer.join().getTopic(), completeCause);
resultFuture.completeExceptionally(completeCause);
}
}, scheduler);
}
use of io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadata.CommitRecordMetadataAndOffset in project starlight-for-kafka by datastax.
the class GroupMetadataTest method testOffsetCommitWithAnotherPending.
@Test
public void testOffsetCommitWithAnotherPending() {
TopicPartition partition = new TopicPartition("foo", 0);
OffsetAndMetadata firstOffset = OffsetAndMetadata.apply(37);
OffsetAndMetadata secondOffset = OffsetAndMetadata.apply(57);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(partition, firstOffset);
group.prepareOffsetCommit(offsets);
assertTrue(group.hasOffsets());
assertEquals(Optional.empty(), group.offset(partition, NAMESPACE_PREFIX));
offsets = new HashMap<>();
offsets.put(partition, secondOffset);
group.prepareOffsetCommit(offsets);
assertTrue(group.hasOffsets());
group.onOffsetCommitAppend(partition, new CommitRecordMetadataAndOffset(Optional.of(4L), firstOffset));
assertTrue(group.hasOffsets());
assertEquals(Optional.of(firstOffset), group.offset(partition, NAMESPACE_PREFIX));
group.onOffsetCommitAppend(partition, new CommitRecordMetadataAndOffset(Optional.of(5L), secondOffset));
assertTrue(group.hasOffsets());
assertEquals(Optional.of(secondOffset), group.offset(partition, NAMESPACE_PREFIX));
}
use of io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadata.CommitRecordMetadataAndOffset in project starlight-for-kafka by datastax.
the class GroupMetadataTest method testTransactionalCommitIsAbortedAndConsumerCommitWins.
@Test
public void testTransactionalCommitIsAbortedAndConsumerCommitWins() {
TopicPartition partition = new TopicPartition("foo", 0);
long producerId = 13232L;
OffsetAndMetadata txnOffsetCommit = OffsetAndMetadata.apply(37);
OffsetAndMetadata consumerOffsetCommit = OffsetAndMetadata.apply(57);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(partition, txnOffsetCommit);
group.prepareTxnOffsetCommit(producerId, offsets);
assertTrue(group.hasOffsets());
assertEquals(Optional.empty(), group.offset(partition, NAMESPACE_PREFIX));
offsets = new HashMap<>();
offsets.put(partition, consumerOffsetCommit);
group.prepareOffsetCommit(offsets);
assertTrue(group.hasOffsets());
group.onOffsetCommitAppend(partition, new CommitRecordMetadataAndOffset(Optional.of(3L), consumerOffsetCommit));
group.onTxnOffsetCommitAppend(producerId, partition, new CommitRecordMetadataAndOffset(Optional.of(4L), txnOffsetCommit));
assertTrue(group.hasOffsets());
// The transactional offset commit hasn't been committed yet, so we should materialize the consumer
// offset commit.
assertEquals(Optional.of(consumerOffsetCommit), group.offset(partition, NAMESPACE_PREFIX));
group.completePendingTxnOffsetCommit(producerId, false);
assertTrue(group.hasOffsets());
// The transactional offset commit should be discarded and the consumer offset commit should continue to be
// materialized.
assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId));
assertEquals(Optional.of(consumerOffsetCommit), group.offset(partition, NAMESPACE_PREFIX));
}
use of io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadata.CommitRecordMetadataAndOffset in project starlight-for-kafka by datastax.
the class GroupMetadataTest method testOffsetCommitFailureWithAnotherPending.
@Test
public void testOffsetCommitFailureWithAnotherPending() {
TopicPartition partition = new TopicPartition("foo", 0);
OffsetAndMetadata firstOffset = OffsetAndMetadata.apply(37);
OffsetAndMetadata secondOffset = OffsetAndMetadata.apply(57);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(partition, firstOffset);
group.prepareOffsetCommit(offsets);
assertTrue(group.hasOffsets());
assertEquals(Optional.empty(), group.offset(partition, NAMESPACE_PREFIX));
offsets = new HashMap<>();
offsets.put(partition, secondOffset);
group.prepareOffsetCommit(offsets);
assertTrue(group.hasOffsets());
group.failPendingOffsetWrite(partition, firstOffset);
assertTrue(group.hasOffsets());
assertEquals(Optional.empty(), group.offset(partition, NAMESPACE_PREFIX));
group.onOffsetCommitAppend(partition, new CommitRecordMetadataAndOffset(Optional.of(3L), secondOffset));
assertTrue(group.hasOffsets());
assertEquals(Optional.of(secondOffset), group.offset(partition, NAMESPACE_PREFIX));
}
Aggregations