use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.
the class ProduceRequestTest method testBuildWithCurrentMessageFormat.
@Test
public void testBuildWithCurrentMessageFormat() {
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(10L, null, "a".getBytes());
ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))).iterator())).setAcks((short) 1).setTimeoutMs(5000));
assertEquals(3, requestBuilder.oldestAllowedVersion());
assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion());
}
use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.
the class RequestResponseTest method createProduceRequest.
private ProduceRequest createProduceRequest(short version) {
if (version < 2) {
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
ProduceRequestData data = new ProduceRequestData().setAcks((short) -1).setTimeoutMs(123).setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList(new ProduceRequestData.TopicProduceData().setName("topic1").setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(records)))).iterator()));
return new ProduceRequest.Builder(version, version, data).build(version);
}
byte magic = version == 2 ? RecordBatch.MAGIC_VALUE_V1 : RecordBatch.MAGIC_VALUE_V2;
MemoryRecords records = MemoryRecords.withRecords(magic, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
return ProduceRequest.forMagic(magic, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(records)))).iterator())).setAcks((short) 1).setTimeoutMs(5000).setTransactionalId(version >= 3 ? "transactionalId" : null)).build(version);
}
use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.
the class Sender method sendProduceRequest.
/**
* Create a produce request from the given record batches
*/
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
if (batches.isEmpty())
return;
final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
// find the minimum magic version used when creating the record sets
byte minUsedMagic = apiVersions.maxUsableProduceMagic();
for (ProducerBatch batch : batches) {
if (batch.magic() < minUsedMagic)
minUsedMagic = batch.magic();
}
ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection();
for (ProducerBatch batch : batches) {
TopicPartition tp = batch.topicPartition;
MemoryRecords records = batch.records();
// which is supporting the new magic version to one which doesn't, then we will need to convert.
if (!records.hasMatchingMagic(minUsedMagic))
records = batch.records().downConvert(minUsedMagic, 0, time).records();
ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic());
if (tpData == null) {
tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic());
tpd.add(tpData);
}
tpData.partitionData().add(new ProduceRequestData.PartitionProduceData().setIndex(tp.partition()).setRecords(records));
recordsByPartition.put(tp, batch);
}
String transactionalId = null;
if (transactionManager != null && transactionManager.isTransactional()) {
transactionalId = transactionManager.transactionalId();
}
ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, new ProduceRequestData().setAcks(acks).setTimeoutMs(timeout).setTransactionalId(transactionalId).setTopicData(tpd));
RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds());
String nodeId = Integer.toString(destination);
ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, requestTimeoutMs, callback);
client.send(clientRequest, now);
log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.
the class RequestResponseTest method testProduceRequestPartitionSize.
@Test
public void testProduceRequestPartitionSize() {
TopicPartition tp0 = new TopicPartition("test", 0);
TopicPartition tp1 = new TopicPartition("test", 1);
MemoryRecords records0 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE, new SimpleRecord("woot".getBytes()), new SimpleRecord("woot".getBytes()));
ProduceRequest request = ProduceRequest.forMagic(RecordBatch.MAGIC_VALUE_V2, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(asList(new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp0.partition()).setRecords(records0))), new ProduceRequestData.TopicProduceData().setName(tp1.topic()).setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp1.partition()).setRecords(records1)))).iterator())).setAcks((short) 1).setTimeoutMs(5000).setTransactionalId("transactionalId")).build((short) 3);
assertEquals(2, request.partitionSizes().size());
assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0));
assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1));
}
use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.
the class ProduceRequestTest method testV6AndBelowCannotUseZStdCompression.
@Test
public void testV6AndBelowCannotUseZStdCompression() {
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.ZSTD, TimestampType.CREATE_TIME, 0L);
builder.append(10L, null, "a".getBytes());
ProduceRequestData produceData = new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(builder.build())))).iterator())).setAcks((short) 1).setTimeoutMs(1000);
// Can't create ProduceRequest instance with version within [3, 7)
for (short version = 3; version < 7; version++) {
ProduceRequest.Builder requestBuilder = new ProduceRequest.Builder(version, version, produceData);
assertThrowsForAllVersions(requestBuilder, UnsupportedCompressionTypeException.class);
}
// Works fine with current version (>= 7)
ProduceRequest.forCurrentMagic(produceData);
}
Aggregations