use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method appendTransactionalRecords.
private int appendTransactionalRecords(ByteBuffer buffer, long pid, long baseOffset, int baseSequence, SimpleRecord... records) {
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, baseOffset, time.milliseconds(), pid, (short) 0, baseSequence, true, RecordBatch.NO_PARTITION_LEADER_EPOCH);
for (SimpleRecord record : records) {
builder.append(record);
}
builder.build();
return records.length;
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testTruncationDetected.
@Test
public void testTruncationDetected() {
// Create some records that include a leader epoch (1)
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, // record epoch is earlier than the leader epoch on the client
1);
builder.appendWithOffset(0L, 0L, "key".getBytes(), "value-1".getBytes());
builder.appendWithOffset(1L, 0L, "key".getBytes(), "value-2".getBytes());
builder.appendWithOffset(2L, 0L, "key".getBytes(), "value-3".getBytes());
MemoryRecords records = builder.build();
buildFetcher();
assignFromUser(singleton(tp0));
// Initialize the epoch=2
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(tp0.topic(), 4);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), partitionCounts, tp -> 2, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// Offset validation requires OffsetForLeaderEpoch request v3 or higher
Node node = metadata.fetch().nodes().get(0);
apiVersions.update(node.idString(), NodeApiVersions.create());
// Seek
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp0).leader, Optional.of(1));
subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.of(1), leaderAndEpoch));
// Check for truncation, this should cause tp0 to go into validation
fetcher.validateOffsetsIfNeeded();
// No fetches sent since we entered validation
assertEquals(0, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
assertTrue(subscriptions.awaitingValidation(tp0));
// Prepare OffsetForEpoch response then check that we update the subscription position correctly.
client.prepareResponse(prepareOffsetsForLeaderEpochResponse(tp0, Errors.NONE, 1, 10L));
consumerClient.pollNoWakeup();
assertFalse(subscriptions.awaitingValidation(tp0));
// Fetch again, now it works
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.pollNoWakeup();
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
assertTrue(partitionRecords.containsKey(tp0));
assertEquals(subscriptions.position(tp0).offset, 3L);
assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1));
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testInvalidDefaultRecordBatch.
@Test
public void testInvalidDefaultRecordBatch() {
buildFetcher();
ByteBuffer buffer = ByteBuffer.allocate(1024);
ByteBufferOutputStream out = new ByteBufferOutputStream(buffer);
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, DefaultRecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024);
builder.append(10L, "key".getBytes(), "value".getBytes());
builder.close();
buffer.flip();
// Garble the CRC
buffer.position(17);
buffer.put("beef".getBytes());
buffer.position(0);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
// the fetchedRecords() should always throw exception due to the bad batch.
for (int i = 0; i < 2; i++) {
try {
fetcher.collectFetch();
fail("fetchedRecords should have raised KafkaException");
} catch (KafkaException e) {
assertEquals(0, subscriptions.position(tp0).offset);
}
}
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class RecordAccumulatorTest method testSplitAndReenqueue.
@Test
public void testSplitAndReenqueue() throws ExecutionException, InterruptedException {
long now = time.milliseconds();
RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10);
// Create a big batch
ByteBuffer buffer = ByteBuffer.allocate(4096);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
byte[] value = new byte[1024];
final AtomicInteger acked = new AtomicInteger(0);
Callback cb = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
acked.incrementAndGet();
}
};
// Append two messages so the batch is too big.
Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
assertNotNull(future1);
assertNotNull(future2);
batch.close();
// Enqueue the batch to the accumulator as if the batch was created by the accumulator.
accum.reenqueue(batch, now);
time.sleep(101L);
// Drain the batch.
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertTrue(result.readyNodes.size() > 0, "The batch should be ready");
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertEquals(1, drained.size(), "Only node1 should be drained");
assertEquals(1, drained.get(node1.id()).size(), "Only one batch should be drained");
// Split and reenqueue the batch.
accum.splitAndReenqueue(drained.get(node1.id()).get(0));
time.sleep(101L);
drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertFalse(drained.isEmpty());
assertFalse(drained.get(node1.id()).isEmpty());
drained.get(node1.id()).get(0).complete(acked.get(), 100L);
assertEquals(1, acked.get(), "The first message should have been acked.");
assertTrue(future1.isDone());
assertEquals(0, future1.get().offset());
drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertFalse(drained.isEmpty());
assertFalse(drained.get(node1.id()).isEmpty());
drained.get(node1.id()).get(0).complete(acked.get(), 100L);
assertEquals(2, acked.get(), "Both message should have been acked.");
assertTrue(future2.isDone());
assertEquals(1, future2.get().offset());
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class ProducerBatchTest method testSplitPreservesMagicAndCompressionType.
@Test
public void testSplitPreservesMagicAndCompressionType() {
for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1, MAGIC_VALUE_V2)) {
for (CompressionType compressionType : CompressionType.values()) {
if (compressionType == CompressionType.NONE && magic < MAGIC_VALUE_V2)
continue;
if (compressionType == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
continue;
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), magic, compressionType, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
while (true) {
FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), Record.EMPTY_HEADERS, null, now);
if (future == null)
break;
}
Deque<ProducerBatch> batches = batch.split(512);
assertTrue(batches.size() >= 2);
for (ProducerBatch splitProducerBatch : batches) {
assertEquals(magic, splitProducerBatch.magic());
assertTrue(splitProducerBatch.isSplitBatch());
for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
assertEquals(magic, splitBatch.magic());
assertEquals(0L, splitBatch.baseOffset());
assertEquals(compressionType, splitBatch.compressionType());
}
}
}
}
}
Aggregations