use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method testProduceRequestPartitionSize.
@Test
public void testProduceRequestPartitionSize() {
TopicPartition tp0 = new TopicPartition("test", 0);
TopicPartition tp1 = new TopicPartition("test", 1);
MemoryRecords records0 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE, new SimpleRecord("woot".getBytes()), new SimpleRecord("woot".getBytes()));
ProduceRequest request = ProduceRequest.forMagic(RecordBatch.MAGIC_VALUE_V2, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(asList(new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp0.partition()).setRecords(records0))), new ProduceRequestData.TopicProduceData().setName(tp1.topic()).setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp1.partition()).setRecords(records1)))).iterator())).setAcks((short) 1).setTimeoutMs(5000).setTransactionalId("transactionalId")).build((short) 3);
assertEquals(2, request.partitionSizes().size());
assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0));
assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method fetchResponseVersionTest.
@Test
public void fetchResponseVersionTest() {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Uuid id = Uuid.randomUuid();
Map<Uuid, String> topicNames = Collections.singletonMap(id, "test");
TopicPartition tp = new TopicPartition("test", 0);
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(-1).setRecords(records);
// Use zero UUID since we are comparing with old request versions
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, tp), partitionData);
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> tpResponseData = new LinkedHashMap<>();
tpResponseData.put(tp, partitionData);
FetchResponse v0Response = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, responseData);
FetchResponse v1Response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
FetchResponse v0Deserialized = FetchResponse.parse(v0Response.serialize((short) 0), (short) 0);
FetchResponse v1Deserialized = FetchResponse.parse(v1Response.serialize((short) 1), (short) 1);
assertEquals(0, v0Deserialized.throttleTimeMs(), "Throttle time must be zero");
assertEquals(10, v1Deserialized.throttleTimeMs(), "Throttle time must be 10");
assertEquals(tpResponseData, v0Deserialized.responseData(topicNames, (short) 0), "Response data does not match");
assertEquals(tpResponseData, v1Deserialized.responseData(topicNames, (short) 1), "Response data does not match");
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> idResponseData = new LinkedHashMap<>();
idResponseData.put(new TopicIdPartition(id, new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(-1).setRecords(records));
FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData);
FetchResponse v12Deserialized = FetchResponse.parse(idTestResponse.serialize((short) 12), (short) 12);
FetchResponse newestDeserialized = FetchResponse.parse(idTestResponse.serialize(FETCH.latestVersion()), FETCH.latestVersion());
assertTrue(v12Deserialized.topicIds().isEmpty());
assertEquals(1, newestDeserialized.topicIds().size());
assertTrue(newestDeserialized.topicIds().contains(id));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class ProduceRequestTest method shouldBeFlaggedAsIdempotentWhenIdempotentRecords.
@Test
public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() {
final MemoryRecords memoryRecords = MemoryRecords.withIdempotentRecords(1, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord);
final ProduceRequest request = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("topic").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(memoryRecords)))).iterator())).setAcks((short) -1).setTimeoutMs(10)).build();
assertTrue(RequestTestUtils.hasIdempotentRecords(request));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class ProduceRequestTest method testMixedIdempotentData.
@Test
public void testMixedIdempotentData() {
final long producerId = 15L;
final short producerEpoch = 5;
final int sequence = 10;
final MemoryRecords nonTxnRecords = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()));
final MemoryRecords txnRecords = MemoryRecords.withIdempotentRecords(CompressionType.NONE, producerId, producerEpoch, sequence, new SimpleRecord("bar".getBytes()));
ProduceRequest.Builder builder = ProduceRequest.forMagic(RecordVersion.current().value, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList(new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))).iterator())).setAcks((short) -1).setTimeoutMs(5000));
final ProduceRequest request = builder.build();
assertFalse(RequestUtils.hasTransactionalRecords(request));
assertTrue(RequestTestUtils.hasIdempotentRecords(request));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testUpdatePositionOnEmptyBatch.
@Test
public void testUpdatePositionOnEmptyBatch() {
buildFetcher();
long producerId = 1;
short producerEpoch = 0;
int sequence = 1;
long baseOffset = 37;
long lastOffset = 54;
int partitionLeaderEpoch = 7;
ByteBuffer buffer = ByteBuffer.allocate(DefaultRecordBatch.RECORD_BATCH_OVERHEAD);
DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.CURRENT_MAGIC_VALUE, producerId, producerEpoch, sequence, baseOffset, lastOffset, partitionLeaderEpoch, TimestampType.CREATE_TIME, System.currentTimeMillis(), false, false);
buffer.flip();
MemoryRecords recordsWithEmptyBatch = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, recordsWithEmptyBatch, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Fetch<byte[], byte[]> fetch = collectFetch();
assertEquals(emptyMap(), fetch.records());
assertTrue(fetch.positionAdvanced());
// The next offset should point to the next batch
assertEquals(lastOffset + 1, subscriptions.position(tp0).offset);
}
Aggregations