use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class TypeTest method testCompactRecordsSerde.
@Test
public void testCompactRecordsSerde() {
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()), new SimpleRecord("bar".getBytes()));
ByteBuffer buffer = ByteBuffer.allocate(Type.COMPACT_RECORDS.sizeOf(records));
Type.COMPACT_RECORDS.write(buffer, records);
buffer.flip();
assertEquals(records, Type.COMPACT_RECORDS.read(buffer));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class KafkaRaftClient method handleFetchSnapshotResponse.
private boolean handleFetchSnapshotResponse(RaftResponse.Inbound responseMetadata, long currentTimeMs) {
FetchSnapshotResponseData data = (FetchSnapshotResponseData) responseMetadata.data;
Errors topLevelError = Errors.forCode(data.errorCode());
if (topLevelError != Errors.NONE) {
return handleTopLevelError(topLevelError, responseMetadata);
}
if (data.topics().size() != 1 && data.topics().get(0).partitions().size() != 1) {
return false;
}
Optional<FetchSnapshotResponseData.PartitionSnapshot> partitionSnapshotOpt = FetchSnapshotResponse.forTopicPartition(data, log.topicPartition());
if (!partitionSnapshotOpt.isPresent()) {
return false;
}
FetchSnapshotResponseData.PartitionSnapshot partitionSnapshot = partitionSnapshotOpt.get();
FetchSnapshotResponseData.LeaderIdAndEpoch currentLeaderIdAndEpoch = partitionSnapshot.currentLeader();
OptionalInt responseLeaderId = optionalLeaderId(currentLeaderIdAndEpoch.leaderId());
int responseEpoch = currentLeaderIdAndEpoch.leaderEpoch();
Errors error = Errors.forCode(partitionSnapshot.errorCode());
Optional<Boolean> handled = maybeHandleCommonResponse(error, responseLeaderId, responseEpoch, currentTimeMs);
if (handled.isPresent()) {
return handled.get();
}
FollowerState state = quorum.followerStateOrThrow();
if (Errors.forCode(partitionSnapshot.errorCode()) == Errors.SNAPSHOT_NOT_FOUND || partitionSnapshot.snapshotId().endOffset() < 0 || partitionSnapshot.snapshotId().epoch() < 0) {
/* The leader deleted the snapshot before the follower could download it. Start over by
* reseting the fetching snapshot state and sending another fetch request.
*/
logger.trace("Leader doesn't know about snapshot id {}, returned error {} and snapshot id {}", state.fetchingSnapshot(), partitionSnapshot.errorCode(), partitionSnapshot.snapshotId());
state.setFetchingSnapshot(Optional.empty());
state.resetFetchTimeout(currentTimeMs);
return true;
}
OffsetAndEpoch snapshotId = new OffsetAndEpoch(partitionSnapshot.snapshotId().endOffset(), partitionSnapshot.snapshotId().epoch());
RawSnapshotWriter snapshot;
if (state.fetchingSnapshot().isPresent()) {
snapshot = state.fetchingSnapshot().get();
} else {
throw new IllegalStateException(String.format("Received unexpected fetch snapshot response: %s", partitionSnapshot));
}
if (!snapshot.snapshotId().equals(snapshotId)) {
throw new IllegalStateException(String.format("Received fetch snapshot response with an invalid id. Expected %s; Received %s", snapshot.snapshotId(), snapshotId));
}
if (snapshot.sizeInBytes() != partitionSnapshot.position()) {
throw new IllegalStateException(String.format("Received fetch snapshot response with an invalid position. Expected %s; Received %s", snapshot.sizeInBytes(), partitionSnapshot.position()));
}
final UnalignedMemoryRecords records;
if (partitionSnapshot.unalignedRecords() instanceof MemoryRecords) {
records = new UnalignedMemoryRecords(((MemoryRecords) partitionSnapshot.unalignedRecords()).buffer());
} else if (partitionSnapshot.unalignedRecords() instanceof UnalignedMemoryRecords) {
records = (UnalignedMemoryRecords) partitionSnapshot.unalignedRecords();
} else {
throw new IllegalStateException(String.format("Received unexpected fetch snapshot response: %s", partitionSnapshot));
}
snapshot.append(records);
if (snapshot.sizeInBytes() == partitionSnapshot.size()) {
// Finished fetching the snapshot.
snapshot.freeze();
state.setFetchingSnapshot(Optional.empty());
if (log.truncateToLatestSnapshot()) {
updateFollowerHighWatermark(state, OptionalLong.of(log.highWatermark().offset));
} else {
throw new IllegalStateException(String.format("Full log truncation expected but didn't happen. Snapshot of %s, log end offset %s, last fetched %s", snapshot.snapshotId(), log.endOffset(), log.lastFetchedEpoch()));
}
}
state.resetFetchTimeout(currentTimeMs);
return true;
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RecordsIterator method createMemoryRecords.
private MemoryRecords createMemoryRecords(FileRecords fileRecords) {
final ByteBuffer buffer;
if (allocatedBuffer.isPresent()) {
buffer = allocatedBuffer.get();
buffer.compact();
} else {
buffer = bufferSupplier.get(Math.min(batchSize, records.sizeInBytes()));
allocatedBuffer = Optional.of(buffer);
}
MemoryRecords memoryRecords = readFileRecords(fileRecords, buffer);
// firstBatchSize() is always non-null because the minimum buffer is HEADER_SIZE_UP_TO_MAGIC.
if (memoryRecords.firstBatchSize() <= buffer.remaining()) {
return memoryRecords;
} else {
// Not enough bytes read; create a bigger buffer
ByteBuffer newBuffer = bufferSupplier.get(memoryRecords.firstBatchSize());
allocatedBuffer = Optional.of(newBuffer);
newBuffer.put(buffer);
bufferSupplier.release(buffer);
return readFileRecords(fileRecords, newBuffer);
}
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class BatchAccumulator method completeCurrentBatch.
private void completeCurrentBatch() {
MemoryRecords data = currentBatch.build();
completed.add(new CompletedBatch<>(currentBatch.baseOffset(), currentBatch.records(), data, memoryPool, currentBatch.initialBuffer()));
currentBatch = null;
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class Sender method sendProduceRequest.
/**
* Create a produce request from the given record batches
*/
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
if (batches.isEmpty())
return;
final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
// find the minimum magic version used when creating the record sets
byte minUsedMagic = apiVersions.maxUsableProduceMagic();
for (ProducerBatch batch : batches) {
if (batch.magic() < minUsedMagic)
minUsedMagic = batch.magic();
}
ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection();
for (ProducerBatch batch : batches) {
TopicPartition tp = batch.topicPartition;
MemoryRecords records = batch.records();
// which is supporting the new magic version to one which doesn't, then we will need to convert.
if (!records.hasMatchingMagic(minUsedMagic))
records = batch.records().downConvert(minUsedMagic, 0, time).records();
ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic());
if (tpData == null) {
tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic());
tpd.add(tpData);
}
tpData.partitionData().add(new ProduceRequestData.PartitionProduceData().setIndex(tp.partition()).setRecords(records));
recordsByPartition.put(tp, batch);
}
String transactionalId = null;
if (transactionManager != null && transactionManager.isTransactional()) {
transactionalId = transactionManager.transactionalId();
}
ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, new ProduceRequestData().setAcks(acks).setTimeoutMs(timeout).setTransactionalId(transactionalId).setTopicData(tpd));
RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds());
String nodeId = Integer.toString(destination);
ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, requestTimeoutMs, callback);
client.send(clientRequest, now);
log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
Aggregations