use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method fetchResponse.
private FetchResponse fetchResponse(Map<TopicPartition, FetchInfo> fetches) {
LinkedHashMap<TopicPartition, PartitionData> tpResponses = new LinkedHashMap<>();
for (Map.Entry<TopicPartition, FetchInfo> fetchEntry : fetches.entrySet()) {
TopicPartition partition = fetchEntry.getKey();
long fetchOffset = fetchEntry.getValue().offset;
int fetchCount = fetchEntry.getValue().count;
final MemoryRecords records;
if (fetchCount == 0) {
records = MemoryRecords.EMPTY;
} else {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, fetchOffset);
for (int i = 0; i < fetchCount; i++) builder.append(0L, ("key-" + i).getBytes(), ("value-" + i).getBytes());
records = builder.build();
}
tpResponses.put(partition, new FetchResponse.PartitionData(Errors.NONE, 0, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
}
return new FetchResponse(Errors.NONE, tpResponses, 0, INVALID_SESSION_ID);
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequestTest method testV3AndAboveShouldContainOnlyOneRecordBatch.
@Test
public void testV3AndAboveShouldContainOnlyOneRecordBatch() {
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(10L, null, "a".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
builder.append(11L, "1".getBytes(), "b".getBytes());
builder.append(12L, null, "c".getBytes());
builder.close();
buffer.flip();
Map<TopicPartition, MemoryRecords> produceData = new HashMap<>();
produceData.put(new TopicPartition("test", 0), MemoryRecords.readableRecords(buffer));
ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forCurrentMagic((short) 1, 5000, produceData);
assertThrowsInvalidRecordExceptionForAllVersions(requestBuilder);
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequestTest method testBuildWithOldMessageFormat.
@Test
public void testBuildWithOldMessageFormat() {
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(10L, null, "a".getBytes());
Map<TopicPartition, MemoryRecords> produceData = new HashMap<>();
produceData.put(new TopicPartition("test", 0), builder.build());
ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forMagic(RecordBatch.MAGIC_VALUE_V1, (short) 1, 5000, produceData, null);
assertEquals(2, requestBuilder.oldestAllowedVersion());
assertEquals(2, requestBuilder.latestAllowedVersion());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequestTest method shouldBeFlaggedAsIdempotentWhenIdempotentRecords.
@Test
public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() throws Exception {
final MemoryRecords memoryRecords = MemoryRecords.withIdempotentRecords(1, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord);
final ProduceRequest request = ProduceRequest.Builder.forCurrentMagic((short) -1, 10, Collections.singletonMap(new TopicPartition("topic", 1), memoryRecords)).build();
assertTrue(request.isIdempotent());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class RequestResponseTest method createFetchResponse.
private FetchResponse createFetchResponse() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponse.AbortedTransaction(234L, 999L));
responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, abortedTransactions, MemoryRecords.EMPTY));
return new FetchResponse(Errors.NONE, responseData, 25, INVALID_SESSION_ID);
}
Aggregations