use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method createFetchResponse.
private FetchResponse createFetchResponse(boolean includeAborted) {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Uuid topicId = Uuid.randomUuid();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicIdPartition(topicId, new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(0).setRecords(records));
List<FetchResponseData.AbortedTransaction> abortedTransactions = emptyList();
if (includeAborted) {
abortedTransactions = singletonList(new FetchResponseData.AbortedTransaction().setProducerId(234L).setFirstOffset(999L));
}
responseData.put(new TopicIdPartition(topicId, new TopicPartition("test", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(1000000).setLogStartOffset(0).setAbortedTransactions(abortedTransactions));
return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, INVALID_SESSION_ID, responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method createFetchResponse.
private FetchResponse createFetchResponse(int sessionId) {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("test", Uuid.randomUuid());
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicIdPartition(topicIds.get("test"), new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(0).setRecords(records));
List<FetchResponseData.AbortedTransaction> abortedTransactions = singletonList(new FetchResponseData.AbortedTransaction().setProducerId(234L).setFirstOffset(999L));
responseData.put(new TopicIdPartition(topicIds.get("test"), new TopicPartition("test", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(1000000).setLogStartOffset(0).setAbortedTransactions(abortedTransactions));
return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, sessionId, responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method createProduceRequest.
private ProduceRequest createProduceRequest(short version) {
if (version < 2) {
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
ProduceRequestData data = new ProduceRequestData().setAcks((short) -1).setTimeoutMs(123).setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList(new ProduceRequestData.TopicProduceData().setName("topic1").setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(records)))).iterator()));
return new ProduceRequest.Builder(version, version, data).build(version);
}
byte magic = version == 2 ? RecordBatch.MAGIC_VALUE_V1 : RecordBatch.MAGIC_VALUE_V2;
MemoryRecords records = MemoryRecords.withRecords(magic, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
return ProduceRequest.forMagic(magic, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(records)))).iterator())).setAcks((short) 1).setTimeoutMs(5000).setTransactionalId(version >= 3 ? "transactionalId" : null)).build(version);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method testFetchResponseV4.
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Map<Uuid, String> topicNames = new HashMap<>();
topicNames.put(Uuid.randomUuid(), "bar");
topicNames.put(Uuid.randomUuid(), "foo");
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponseData.AbortedTransaction> abortedTransactions = asList(new FetchResponseData.AbortedTransaction().setProducerId(10).setFirstOffset(100), new FetchResponseData.AbortedTransaction().setProducerId(15).setFirstOffset(50));
// Use zero UUID since this is an old request version.
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setAbortedTransactions(abortedTransactions).setRecords(records));
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(900000).setLastStableOffset(5).setRecords(records));
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(70000).setLastStableOffset(6).setRecords(records));
FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
FetchResponse deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4);
assertEquals(responseData.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().topicPartition(), Map.Entry::getValue)), deserialized.responseData(topicNames, (short) 4));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RequestResponseTest method createFetchResponse.
private FetchResponse createFetchResponse(short version) {
FetchResponseData data = new FetchResponseData();
if (version > 0) {
data.setThrottleTimeMs(345);
}
if (version > 6) {
data.setErrorCode(Errors.NONE.code()).setSessionId(123);
}
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
FetchResponseData.PartitionData partition = new FetchResponseData.PartitionData().setPartitionIndex(0).setErrorCode(Errors.NONE.code()).setHighWatermark(123L).setRecords(records);
if (version > 3) {
partition.setLastStableOffset(234L);
}
if (version > 4) {
partition.setLogStartOffset(456L);
}
if (version > 10) {
partition.setPreferredReadReplica(1);
}
if (version > 11) {
partition.setDivergingEpoch(new FetchResponseData.EpochEndOffset().setEndOffset(1L).setEpoch(2)).setSnapshotId(new FetchResponseData.SnapshotId().setEndOffset(1L).setEndOffset(2)).setCurrentLeader(new FetchResponseData.LeaderIdAndEpoch().setLeaderEpoch(1).setLeaderId(2));
}
FetchResponseData.FetchableTopicResponse response = new FetchResponseData.FetchableTopicResponse().setTopic("topic").setPartitions(singletonList(partition));
if (version > 12) {
response.setTopicId(Uuid.randomUuid());
}
data.setResponses(singletonList(response));
return new FetchResponse(data);
}
Aggregations