use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequestTest method shouldBeFlaggedAsTransactionalWhenTransactionalRecords.
@Test
public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() throws Exception {
final MemoryRecords memoryRecords = MemoryRecords.withTransactionalRecords(0, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord);
final ProduceRequest request = ProduceRequest.Builder.forCurrentMagic((short) -1, 10, Collections.singletonMap(new TopicPartition("topic", 1), memoryRecords)).build();
assertTrue(request.isTransactional());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class RequestResponseTest method fetchResponseVersionTest.
@Test
public void fetchResponseVersionTest() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
FetchResponse v0Response = new FetchResponse(Errors.NONE, responseData, 0, INVALID_SESSION_ID);
FetchResponse v1Response = new FetchResponse(Errors.NONE, responseData, 10, INVALID_SESSION_ID);
assertEquals("Throttle time must be zero", 0, v0Response.throttleTimeMs());
assertEquals("Throttle time must be 10", 10, v1Response.throttleTimeMs());
assertEquals("Should use schema version 0", ApiKeys.FETCH.responseSchema((short) 0), v0Response.toStruct((short) 0).schema());
assertEquals("Should use schema version 1", ApiKeys.FETCH.responseSchema((short) 1), v1Response.toStruct((short) 1).schema());
assertEquals("Response data does not match", responseData, v0Response.responseData());
assertEquals("Response data does not match", responseData, v1Response.responseData());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class RequestResponseTest method testFetchResponseV4.
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponse.AbortedTransaction> abortedTransactions = asList(new FetchResponse.AbortedTransaction(10, 100), new FetchResponse.AbortedTransaction(15, 50));
responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData(Errors.NONE, 100000, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, abortedTransactions, records));
responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData(Errors.NONE, 900000, 5, FetchResponse.INVALID_LOG_START_OFFSET, null, records));
responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(Errors.NONE, 70000, 6, FetchResponse.INVALID_LOG_START_OFFSET, Collections.<FetchResponse.AbortedTransaction>emptyList(), records));
FetchResponse response = new FetchResponse(Errors.NONE, responseData, 10, INVALID_SESSION_ID);
FetchResponse deserialized = FetchResponse.parse(toBuffer(response.toStruct((short) 4)), (short) 4);
assertEquals(responseData, deserialized.responseData());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequest method toStruct.
/**
* Visible for testing.
*/
@Override
public Struct toStruct() {
// Store it in a local variable to protect against concurrent updates
Map<TopicPartition, MemoryRecords> partitionRecords = partitionRecordsOrFail();
short version = version();
Struct struct = new Struct(ApiKeys.PRODUCE.requestSchema(version));
Map<String, Map<Integer, MemoryRecords>> recordsByTopic = CollectionUtils.groupDataByTopic(partitionRecords);
struct.set(ACKS_KEY_NAME, acks);
struct.set(TIMEOUT_KEY_NAME, timeout);
struct.setIfExists(NULLABLE_TRANSACTIONAL_ID, transactionalId);
List<Struct> topicDatas = new ArrayList<>(recordsByTopic.size());
for (Map.Entry<String, Map<Integer, MemoryRecords>> topicEntry : recordsByTopic.entrySet()) {
Struct topicData = struct.instance(TOPIC_DATA_KEY_NAME);
topicData.set(TOPIC_NAME, topicEntry.getKey());
List<Struct> partitionArray = new ArrayList<>();
for (Map.Entry<Integer, MemoryRecords> partitionEntry : topicEntry.getValue().entrySet()) {
MemoryRecords records = partitionEntry.getValue();
Struct part = topicData.instance(PARTITION_DATA_KEY_NAME).set(PARTITION_ID, partitionEntry.getKey()).set(RECORD_SET_KEY_NAME, records);
partitionArray.add(part);
}
topicData.set(PARTITION_DATA_KEY_NAME, partitionArray.toArray());
topicDatas.add(topicData);
}
struct.set(TOPIC_DATA_KEY_NAME, topicDatas.toArray());
return struct;
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null)
batches.add(batch);
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
Aggregations