Search in sources :

Example 86 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class TypeTest method testCompactRecordsSerde.

@Test
public void testCompactRecordsSerde() {
    MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()), new SimpleRecord("bar".getBytes()));
    ByteBuffer buffer = ByteBuffer.allocate(Type.COMPACT_RECORDS.sizeOf(records));
    Type.COMPACT_RECORDS.write(buffer, records);
    buffer.flip();
    assertEquals(records, Type.COMPACT_RECORDS.read(buffer));
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 87 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class KafkaRaftClient method handleFetchSnapshotResponse.

private boolean handleFetchSnapshotResponse(RaftResponse.Inbound responseMetadata, long currentTimeMs) {
    FetchSnapshotResponseData data = (FetchSnapshotResponseData) responseMetadata.data;
    Errors topLevelError = Errors.forCode(data.errorCode());
    if (topLevelError != Errors.NONE) {
        return handleTopLevelError(topLevelError, responseMetadata);
    }
    if (data.topics().size() != 1 && data.topics().get(0).partitions().size() != 1) {
        return false;
    }
    Optional<FetchSnapshotResponseData.PartitionSnapshot> partitionSnapshotOpt = FetchSnapshotResponse.forTopicPartition(data, log.topicPartition());
    if (!partitionSnapshotOpt.isPresent()) {
        return false;
    }
    FetchSnapshotResponseData.PartitionSnapshot partitionSnapshot = partitionSnapshotOpt.get();
    FetchSnapshotResponseData.LeaderIdAndEpoch currentLeaderIdAndEpoch = partitionSnapshot.currentLeader();
    OptionalInt responseLeaderId = optionalLeaderId(currentLeaderIdAndEpoch.leaderId());
    int responseEpoch = currentLeaderIdAndEpoch.leaderEpoch();
    Errors error = Errors.forCode(partitionSnapshot.errorCode());
    Optional<Boolean> handled = maybeHandleCommonResponse(error, responseLeaderId, responseEpoch, currentTimeMs);
    if (handled.isPresent()) {
        return handled.get();
    }
    FollowerState state = quorum.followerStateOrThrow();
    if (Errors.forCode(partitionSnapshot.errorCode()) == Errors.SNAPSHOT_NOT_FOUND || partitionSnapshot.snapshotId().endOffset() < 0 || partitionSnapshot.snapshotId().epoch() < 0) {
        /* The leader deleted the snapshot before the follower could download it. Start over by
             * reseting the fetching snapshot state and sending another fetch request.
             */
        logger.trace("Leader doesn't know about snapshot id {}, returned error {} and snapshot id {}", state.fetchingSnapshot(), partitionSnapshot.errorCode(), partitionSnapshot.snapshotId());
        state.setFetchingSnapshot(Optional.empty());
        state.resetFetchTimeout(currentTimeMs);
        return true;
    }
    OffsetAndEpoch snapshotId = new OffsetAndEpoch(partitionSnapshot.snapshotId().endOffset(), partitionSnapshot.snapshotId().epoch());
    RawSnapshotWriter snapshot;
    if (state.fetchingSnapshot().isPresent()) {
        snapshot = state.fetchingSnapshot().get();
    } else {
        throw new IllegalStateException(String.format("Received unexpected fetch snapshot response: %s", partitionSnapshot));
    }
    if (!snapshot.snapshotId().equals(snapshotId)) {
        throw new IllegalStateException(String.format("Received fetch snapshot response with an invalid id. Expected %s; Received %s", snapshot.snapshotId(), snapshotId));
    }
    if (snapshot.sizeInBytes() != partitionSnapshot.position()) {
        throw new IllegalStateException(String.format("Received fetch snapshot response with an invalid position. Expected %s; Received %s", snapshot.sizeInBytes(), partitionSnapshot.position()));
    }
    final UnalignedMemoryRecords records;
    if (partitionSnapshot.unalignedRecords() instanceof MemoryRecords) {
        records = new UnalignedMemoryRecords(((MemoryRecords) partitionSnapshot.unalignedRecords()).buffer());
    } else if (partitionSnapshot.unalignedRecords() instanceof UnalignedMemoryRecords) {
        records = (UnalignedMemoryRecords) partitionSnapshot.unalignedRecords();
    } else {
        throw new IllegalStateException(String.format("Received unexpected fetch snapshot response: %s", partitionSnapshot));
    }
    snapshot.append(records);
    if (snapshot.sizeInBytes() == partitionSnapshot.size()) {
        // Finished fetching the snapshot.
        snapshot.freeze();
        state.setFetchingSnapshot(Optional.empty());
        if (log.truncateToLatestSnapshot()) {
            updateFollowerHighWatermark(state, OptionalLong.of(log.highWatermark().offset));
        } else {
            throw new IllegalStateException(String.format("Full log truncation expected but didn't happen. Snapshot of %s, log end offset %s, last fetched %s", snapshot.snapshotId(), log.endOffset(), log.lastFetchedEpoch()));
        }
    }
    state.resetFetchTimeout(currentTimeMs);
    return true;
}
Also used : FetchSnapshotResponseData(org.apache.kafka.common.message.FetchSnapshotResponseData) OptionalInt(java.util.OptionalInt) Errors(org.apache.kafka.common.protocol.Errors) RawSnapshotWriter(org.apache.kafka.snapshot.RawSnapshotWriter) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords)

Example 88 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class RecordsIterator method createMemoryRecords.

private MemoryRecords createMemoryRecords(FileRecords fileRecords) {
    final ByteBuffer buffer;
    if (allocatedBuffer.isPresent()) {
        buffer = allocatedBuffer.get();
        buffer.compact();
    } else {
        buffer = bufferSupplier.get(Math.min(batchSize, records.sizeInBytes()));
        allocatedBuffer = Optional.of(buffer);
    }
    MemoryRecords memoryRecords = readFileRecords(fileRecords, buffer);
    // firstBatchSize() is always non-null because the minimum buffer is HEADER_SIZE_UP_TO_MAGIC.
    if (memoryRecords.firstBatchSize() <= buffer.remaining()) {
        return memoryRecords;
    } else {
        // Not enough bytes read; create a bigger buffer
        ByteBuffer newBuffer = bufferSupplier.get(memoryRecords.firstBatchSize());
        allocatedBuffer = Optional.of(newBuffer);
        newBuffer.put(buffer);
        bufferSupplier.release(buffer);
        return readFileRecords(fileRecords, newBuffer);
    }
}
Also used : ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 89 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class BatchAccumulator method completeCurrentBatch.

private void completeCurrentBatch() {
    MemoryRecords data = currentBatch.build();
    completed.add(new CompletedBatch<>(currentBatch.baseOffset(), currentBatch.records(), data, memoryPool, currentBatch.initialBuffer()));
    currentBatch = null;
}
Also used : MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 90 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class Sender method sendProduceRequest.

/**
 * Create a produce request from the given record batches
 */
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
    if (batches.isEmpty())
        return;
    final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
    // find the minimum magic version used when creating the record sets
    byte minUsedMagic = apiVersions.maxUsableProduceMagic();
    for (ProducerBatch batch : batches) {
        if (batch.magic() < minUsedMagic)
            minUsedMagic = batch.magic();
    }
    ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection();
    for (ProducerBatch batch : batches) {
        TopicPartition tp = batch.topicPartition;
        MemoryRecords records = batch.records();
        // which is supporting the new magic version to one which doesn't, then we will need to convert.
        if (!records.hasMatchingMagic(minUsedMagic))
            records = batch.records().downConvert(minUsedMagic, 0, time).records();
        ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic());
        if (tpData == null) {
            tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic());
            tpd.add(tpData);
        }
        tpData.partitionData().add(new ProduceRequestData.PartitionProduceData().setIndex(tp.partition()).setRecords(records));
        recordsByPartition.put(tp, batch);
    }
    String transactionalId = null;
    if (transactionManager != null && transactionManager.isTransactional()) {
        transactionalId = transactionManager.transactionalId();
    }
    ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, new ProduceRequestData().setAcks(acks).setTimeoutMs(timeout).setTransactionalId(transactionalId).setTopicData(tpd));
    RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds());
    String nodeId = Integer.toString(destination);
    ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, requestTimeoutMs, callback);
    client.send(clientRequest, now);
    log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
Also used : Max(org.apache.kafka.common.metrics.stats.Max) TransactionAbortedException(org.apache.kafka.common.errors.TransactionAbortedException) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) Metadata(org.apache.kafka.clients.Metadata) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) Function(java.util.function.Function) ClientRequest(org.apache.kafka.clients.ClientRequest) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) RequestHeader(org.apache.kafka.common.requests.RequestHeader) FindCoordinatorRequest(org.apache.kafka.common.requests.FindCoordinatorRequest) InvalidMetadataException(org.apache.kafka.common.errors.InvalidMetadataException) KafkaClient(org.apache.kafka.clients.KafkaClient) RecordBatch(org.apache.kafka.common.record.RecordBatch) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) MetricName(org.apache.kafka.common.MetricName) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) IOException(java.io.IOException) ApiVersions(org.apache.kafka.clients.ApiVersions) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) List(java.util.List) NetworkClientUtils(org.apache.kafka.clients.NetworkClientUtils) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) Avg(org.apache.kafka.common.metrics.stats.Avg) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Meter(org.apache.kafka.common.metrics.stats.Meter) Collections(java.util.Collections) ClientResponse(org.apache.kafka.clients.ClientResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) HashMap(java.util.HashMap) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) TopicPartition(org.apache.kafka.common.TopicPartition) ClientRequest(org.apache.kafka.clients.ClientRequest) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Aggregations

MemoryRecords (org.apache.kafka.common.record.MemoryRecords)108 TopicPartition (org.apache.kafka.common.TopicPartition)59 Test (org.junit.jupiter.api.Test)43 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)40 ByteBuffer (java.nio.ByteBuffer)34 ArrayList (java.util.ArrayList)28 List (java.util.List)27 Test (org.junit.Test)27 HashMap (java.util.HashMap)26 LinkedHashMap (java.util.LinkedHashMap)23 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)23 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)18 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)16 Collections.singletonList (java.util.Collections.singletonList)15 Record (org.apache.kafka.common.record.Record)15 Arrays.asList (java.util.Arrays.asList)14 Collections.emptyList (java.util.Collections.emptyList)14 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)14 Metrics (org.apache.kafka.common.metrics.Metrics)12 MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)11