Search in sources :

Example 6 with ProduceRequestData

use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.

the class ProduceRequestTest method testBuildWithCurrentMessageFormat.

@Test
public void testBuildWithCurrentMessageFormat() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    builder.append(10L, null, "a".getBytes());
    ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))).iterator())).setAcks((short) 1).setTimeoutMs(5000));
    assertEquals(3, requestBuilder.oldestAllowedVersion());
    assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion());
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 7 with ProduceRequestData

use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.

the class RequestResponseTest method createProduceRequest.

private ProduceRequest createProduceRequest(short version) {
    if (version < 2) {
        MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
        ProduceRequestData data = new ProduceRequestData().setAcks((short) -1).setTimeoutMs(123).setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList(new ProduceRequestData.TopicProduceData().setName("topic1").setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(records)))).iterator()));
        return new ProduceRequest.Builder(version, version, data).build(version);
    }
    byte magic = version == 2 ? RecordBatch.MAGIC_VALUE_V1 : RecordBatch.MAGIC_VALUE_V2;
    MemoryRecords records = MemoryRecords.withRecords(magic, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
    return ProduceRequest.forMagic(magic, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(records)))).iterator())).setAcks((short) 1).setTimeoutMs(5000).setTransactionalId(version >= 3 ? "transactionalId" : null)).build(version);
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 8 with ProduceRequestData

use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.

the class Sender method sendProduceRequest.

/**
 * Create a produce request from the given record batches
 */
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
    if (batches.isEmpty())
        return;
    final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
    // find the minimum magic version used when creating the record sets
    byte minUsedMagic = apiVersions.maxUsableProduceMagic();
    for (ProducerBatch batch : batches) {
        if (batch.magic() < minUsedMagic)
            minUsedMagic = batch.magic();
    }
    ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection();
    for (ProducerBatch batch : batches) {
        TopicPartition tp = batch.topicPartition;
        MemoryRecords records = batch.records();
        // which is supporting the new magic version to one which doesn't, then we will need to convert.
        if (!records.hasMatchingMagic(minUsedMagic))
            records = batch.records().downConvert(minUsedMagic, 0, time).records();
        ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic());
        if (tpData == null) {
            tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic());
            tpd.add(tpData);
        }
        tpData.partitionData().add(new ProduceRequestData.PartitionProduceData().setIndex(tp.partition()).setRecords(records));
        recordsByPartition.put(tp, batch);
    }
    String transactionalId = null;
    if (transactionManager != null && transactionManager.isTransactional()) {
        transactionalId = transactionManager.transactionalId();
    }
    ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, new ProduceRequestData().setAcks(acks).setTimeoutMs(timeout).setTransactionalId(transactionalId).setTopicData(tpd));
    RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds());
    String nodeId = Integer.toString(destination);
    ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, requestTimeoutMs, callback);
    client.send(clientRequest, now);
    log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
Also used : Max(org.apache.kafka.common.metrics.stats.Max) TransactionAbortedException(org.apache.kafka.common.errors.TransactionAbortedException) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) Metadata(org.apache.kafka.clients.Metadata) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) Function(java.util.function.Function) ClientRequest(org.apache.kafka.clients.ClientRequest) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) RequestHeader(org.apache.kafka.common.requests.RequestHeader) FindCoordinatorRequest(org.apache.kafka.common.requests.FindCoordinatorRequest) InvalidMetadataException(org.apache.kafka.common.errors.InvalidMetadataException) KafkaClient(org.apache.kafka.clients.KafkaClient) RecordBatch(org.apache.kafka.common.record.RecordBatch) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) MetricName(org.apache.kafka.common.MetricName) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) IOException(java.io.IOException) ApiVersions(org.apache.kafka.clients.ApiVersions) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) List(java.util.List) NetworkClientUtils(org.apache.kafka.clients.NetworkClientUtils) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) Avg(org.apache.kafka.common.metrics.stats.Avg) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Meter(org.apache.kafka.common.metrics.stats.Meter) Collections(java.util.Collections) ClientResponse(org.apache.kafka.clients.ClientResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) HashMap(java.util.HashMap) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) TopicPartition(org.apache.kafka.common.TopicPartition) ClientRequest(org.apache.kafka.clients.ClientRequest) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 9 with ProduceRequestData

use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.

the class RequestResponseTest method testProduceRequestPartitionSize.

@Test
public void testProduceRequestPartitionSize() {
    TopicPartition tp0 = new TopicPartition("test", 0);
    TopicPartition tp1 = new TopicPartition("test", 1);
    MemoryRecords records0 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
    MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE, new SimpleRecord("woot".getBytes()), new SimpleRecord("woot".getBytes()));
    ProduceRequest request = ProduceRequest.forMagic(RecordBatch.MAGIC_VALUE_V2, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(asList(new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp0.partition()).setRecords(records0))), new ProduceRequestData.TopicProduceData().setName(tp1.topic()).setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp1.partition()).setRecords(records1)))).iterator())).setAcks((short) 1).setTimeoutMs(5000).setTransactionalId("transactionalId")).build((short) 3);
    assertEquals(2, request.partitionSizes().size());
    assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0));
    assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1));
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 10 with ProduceRequestData

use of org.apache.kafka.common.message.ProduceRequestData in project kafka by apache.

the class ProduceRequestTest method testV6AndBelowCannotUseZStdCompression.

@Test
public void testV6AndBelowCannotUseZStdCompression() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.ZSTD, TimestampType.CREATE_TIME, 0L);
    builder.append(10L, null, "a".getBytes());
    ProduceRequestData produceData = new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(builder.build())))).iterator())).setAcks((short) 1).setTimeoutMs(1000);
    // Can't create ProduceRequest instance with version within [3, 7)
    for (short version = 3; version < 7; version++) {
        ProduceRequest.Builder requestBuilder = new ProduceRequest.Builder(version, version, produceData);
        assertThrowsForAllVersions(requestBuilder, UnsupportedCompressionTypeException.class);
    }
    // Works fine with current version (>= 7)
    ProduceRequest.forCurrentMagic(produceData);
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Aggregations

ProduceRequestData (org.apache.kafka.common.message.ProduceRequestData)16 Test (org.junit.jupiter.api.Test)13 ByteBuffer (java.nio.ByteBuffer)9 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)7 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)6 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)4 ProduceRequest (org.apache.kafka.common.requests.ProduceRequest)4 ProduceResponse (org.apache.kafka.common.requests.ProduceResponse)4 ApiVersions (org.apache.kafka.clients.ApiVersions)2 ClientRequest (org.apache.kafka.clients.ClientRequest)2 Cluster (org.apache.kafka.common.Cluster)2 MetricName (org.apache.kafka.common.MetricName)2 Node (org.apache.kafka.common.Node)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 ProduceResponseData (org.apache.kafka.common.message.ProduceResponseData)2 Sensor (org.apache.kafka.common.metrics.Sensor)2 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1