use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class LocalLogManager method scheduleAppend.
@Override
public long scheduleAppend(int epoch, List<ApiMessageAndVersion> batch) {
if (batch.isEmpty()) {
throw new IllegalArgumentException("Batch cannot be empty");
}
List<ApiMessageAndVersion> first = batch.subList(0, batch.size() / 2);
List<ApiMessageAndVersion> second = batch.subList(batch.size() / 2, batch.size());
assertEquals(batch.size(), first.size() + second.size());
assertFalse(second.isEmpty());
OptionalLong firstOffset = first.stream().mapToLong(record -> scheduleAtomicAppend(epoch, Collections.singletonList(record))).max();
if (firstOffset.isPresent() && resignAfterNonAtomicCommit.getAndSet(false)) {
// Emulate losing leadership in the middle of a non-atomic append by not writing
// the rest of the batch and instead writing a leader change message
resign(leader.epoch());
return firstOffset.getAsLong() + second.size();
} else {
return second.stream().mapToLong(record -> scheduleAtomicAppend(epoch, Collections.singletonList(record))).max().getAsLong();
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class RemoteLogMetadataTransformTest method testRemoteLogSegmentMetadataUpdateTransform.
@Test
public void testRemoteLogSegmentMetadataUpdateTransform() {
RemoteLogSegmentMetadataUpdateTransform metadataUpdateTransform = new RemoteLogSegmentMetadataUpdateTransform();
RemoteLogSegmentMetadataUpdate metadataUpdate = new RemoteLogSegmentMetadataUpdate(new RemoteLogSegmentId(TP0, Uuid.randomUuid()), time.milliseconds(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 1);
ApiMessageAndVersion apiMessageAndVersion = metadataUpdateTransform.toApiMessageAndVersion(metadataUpdate);
RemoteLogSegmentMetadataUpdate metadataUpdateFromRecord = metadataUpdateTransform.fromApiMessageAndVersion(apiMessageAndVersion);
Assertions.assertEquals(metadataUpdate, metadataUpdateFromRecord);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class RemoteLogMetadataTransformTest method testRemoteLogSegmentMetadataTransform.
@Test
public void testRemoteLogSegmentMetadataTransform() {
RemoteLogSegmentMetadataTransform metadataTransform = new RemoteLogSegmentMetadataTransform();
RemoteLogSegmentMetadata metadata = createRemoteLogSegmentMetadata();
ApiMessageAndVersion apiMessageAndVersion = metadataTransform.toApiMessageAndVersion(metadata);
RemoteLogSegmentMetadata remoteLogSegmentMetadataFromRecord = metadataTransform.fromApiMessageAndVersion(apiMessageAndVersion);
Assertions.assertEquals(metadata, remoteLogSegmentMetadataFromRecord);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class RemoteLogMetadataTransformTest method testRemoteLogPartitionMetadataTransform.
@Test
public void testRemoteLogPartitionMetadataTransform() {
RemotePartitionDeleteMetadataTransform transform = new RemotePartitionDeleteMetadataTransform();
RemotePartitionDeleteMetadata partitionDeleteMetadata = new RemotePartitionDeleteMetadata(TP0, RemotePartitionDeleteState.DELETE_PARTITION_STARTED, time.milliseconds(), 1);
ApiMessageAndVersion apiMessageAndVersion = transform.toApiMessageAndVersion(partitionDeleteMetadata);
RemotePartitionDeleteMetadata partitionDeleteMetadataFromRecord = transform.fromApiMessageAndVersion(apiMessageAndVersion);
Assertions.assertEquals(partitionDeleteMetadata, partitionDeleteMetadataFromRecord);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class QuorumControllerTest method testSnapshotConfiguration.
@Test
public void testSnapshotConfiguration() throws Throwable {
final int numBrokers = 4;
final int maxNewRecordBytes = 4;
Map<Integer, Long> brokerEpochs = new HashMap<>();
Uuid fooId;
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, builder -> {
builder.setConfigDefs(CONFIGS).setSnapshotMaxNewRecordBytes(maxNewRecordBytes);
})) {
QuorumController active = controlEnv.activeController();
for (int i = 0; i < numBrokers; i++) {
BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
brokerEpochs.put(i, reply.epoch());
}
for (int i = 0; i < numBrokers - 1; i++) {
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
}
CreateTopicsResponseData fooData = active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
fooId = fooData.topics().find("foo").topicId();
active.allocateProducerIds(new AllocateProducerIdsRequestData().setBrokerId(0).setBrokerEpoch(brokerEpochs.get(0))).get();
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(logEnv.waitForLatestSnapshot());
checkSnapshotSubcontent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
}
Aggregations