use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testReadCommittedWithCommittedAndAbortedTransactions.
@Test
public void testReadCommittedWithCommittedAndAbortedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
List<FetchResponseData.AbortedTransaction> abortedTransactions = new ArrayList<>();
long pid1 = 1L;
long pid2 = 2L;
// Appends for producer 1 (eventually committed)
appendTransactionalRecords(buffer, pid1, 0L, new SimpleRecord("commit1-1".getBytes(), "value".getBytes()), new SimpleRecord("commit1-2".getBytes(), "value".getBytes()));
// Appends for producer 2 (eventually aborted)
appendTransactionalRecords(buffer, pid2, 2L, new SimpleRecord("abort2-1".getBytes(), "value".getBytes()));
// commit producer 1
commitTransaction(buffer, pid1, 3L);
// append more for producer 2 (eventually aborted)
appendTransactionalRecords(buffer, pid2, 4L, new SimpleRecord("abort2-2".getBytes(), "value".getBytes()));
// abort producer 2
abortTransaction(buffer, pid2, 5L);
abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(2L));
// New transaction for producer 1 (eventually aborted)
appendTransactionalRecords(buffer, pid1, 6L, new SimpleRecord("abort1-1".getBytes(), "value".getBytes()));
// New transaction for producer 2 (eventually committed)
appendTransactionalRecords(buffer, pid2, 7L, new SimpleRecord("commit2-1".getBytes(), "value".getBytes()));
// Add messages for producer 1 (eventually aborted)
appendTransactionalRecords(buffer, pid1, 8L, new SimpleRecord("abort1-2".getBytes(), "value".getBytes()));
// abort producer 1
abortTransaction(buffer, pid1, 9L);
abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(6));
// commit producer 2
commitTransaction(buffer, pid2, 10L);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
// There are only 3 committed records
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> fetchedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
fetchedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(mkSet("commit1-1", "commit1-2", "commit2-1"), fetchedKeys);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testConsumerPositionUpdatedWhenSkippingAbortedTransactions.
@Test
public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
// Ensure that we don't return any of the aborted records, but yet advance the consumer position.
assertFalse(fetchedRecords.containsKey(tp0));
assertEquals(currentOffset, subscriptions.position(tp0).offset);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class UncompressedRecordBatchValidationBenchmark method measureAssignOffsetsNonCompressed.
@Benchmark
public void measureAssignOffsetsNonCompressed(Blackhole bh) {
MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate());
LogValidator.assignOffsetsNonCompressed(records, new TopicPartition("a", 0), new LongRef(startingOffset), System.currentTimeMillis(), false, TimestampType.CREATE_TIME, Long.MAX_VALUE, 0, new AppendOrigin.Client$(), messageVersion, brokerTopicStats);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class KafkaRaftClientTest method testLeaderAppendSingleMemberQuorum.
@Test
public void testLeaderAppendSingleMemberQuorum() throws Exception {
int localId = 0;
Set<Integer> voters = Collections.singleton(localId);
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build();
long now = context.time.milliseconds();
context.pollUntil(() -> context.log.endOffset().offset == 1L);
context.assertElectedLeader(1, localId);
// We still write the leader change message
assertEquals(OptionalLong.of(1L), context.client.highWatermark());
String[] appendRecords = new String[] { "a", "b", "c" };
// First poll has no high watermark advance
context.client.poll();
assertEquals(OptionalLong.of(1L), context.client.highWatermark());
context.client.scheduleAppend(context.currentEpoch(), Arrays.asList(appendRecords));
// Then poll the appended data with leader change record
context.client.poll();
assertEquals(OptionalLong.of(4L), context.client.highWatermark());
// Now try reading it
int otherNodeId = 1;
List<MutableRecordBatch> batches = new ArrayList<>(2);
boolean appended = true;
// Continue to fetch until the leader returns an empty response
while (appended) {
long fetchOffset = 0;
int lastFetchedEpoch = 0;
if (!batches.isEmpty()) {
MutableRecordBatch lastBatch = batches.get(batches.size() - 1);
fetchOffset = lastBatch.lastOffset() + 1;
lastFetchedEpoch = lastBatch.partitionLeaderEpoch();
}
context.deliverRequest(context.fetchRequest(1, otherNodeId, fetchOffset, lastFetchedEpoch, 0));
context.pollUntilResponse();
MemoryRecords fetchedRecords = context.assertSentFetchPartitionResponse(Errors.NONE, 1, OptionalInt.of(localId));
List<MutableRecordBatch> fetchedBatch = Utils.toList(fetchedRecords.batchIterator());
batches.addAll(fetchedBatch);
appended = !fetchedBatch.isEmpty();
}
assertEquals(2, batches.size());
MutableRecordBatch leaderChangeBatch = batches.get(0);
assertTrue(leaderChangeBatch.isControlBatch());
List<Record> readRecords = Utils.toList(leaderChangeBatch.iterator());
assertEquals(1, readRecords.size());
Record record = readRecords.get(0);
assertEquals(now, record.timestamp());
RaftClientTestContext.verifyLeaderChangeMessage(localId, Collections.singletonList(localId), Collections.singletonList(localId), record.key(), record.value());
MutableRecordBatch batch = batches.get(1);
assertEquals(1, batch.partitionLeaderEpoch());
readRecords = Utils.toList(batch.iterator());
assertEquals(3, readRecords.size());
for (int i = 0; i < appendRecords.length; i++) {
assertEquals(appendRecords[i], Utils.utf8(readRecords.get(i).value()));
}
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class BatchBuilderTest method testBuildBatch.
@ParameterizedTest
@EnumSource(CompressionType.class)
void testBuildBatch(CompressionType compressionType) {
ByteBuffer buffer = ByteBuffer.allocate(1024);
long baseOffset = 57;
long logAppendTime = time.milliseconds();
boolean isControlBatch = false;
int leaderEpoch = 15;
BatchBuilder<String> builder = new BatchBuilder<>(buffer, serde, compressionType, baseOffset, logAppendTime, isControlBatch, leaderEpoch, buffer.limit());
List<String> records = Arrays.asList("a", "ap", "app", "appl", "apple");
records.forEach(record -> builder.appendRecord(record, null));
MemoryRecords builtRecordSet = builder.build();
assertTrue(builder.bytesNeeded(Arrays.asList("a"), null).isPresent());
assertThrows(IllegalStateException.class, () -> builder.appendRecord("a", null));
List<MutableRecordBatch> builtBatches = Utils.toList(builtRecordSet.batchIterator());
assertEquals(1, builtBatches.size());
assertEquals(records, builder.records());
MutableRecordBatch batch = builtBatches.get(0);
assertEquals(5, batch.countOrNull());
assertEquals(compressionType, batch.compressionType());
assertEquals(baseOffset, batch.baseOffset());
assertEquals(logAppendTime, batch.maxTimestamp());
assertEquals(isControlBatch, batch.isControlBatch());
assertEquals(leaderEpoch, batch.partitionLeaderEpoch());
List<String> builtRecords = Utils.toList(batch).stream().map(record -> Utils.utf8(record.value())).collect(Collectors.toList());
assertEquals(records, builtRecords);
}
Aggregations