use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class QuorumControllerTest method checkSnapshotSubcontent.
/**
* This function checks that the iterator is a subset of the expected list.
*
* This is needed because when generating snapshots through configuration is difficult to control exactly when a
* snapshot will be generated and which committed offset will be included in the snapshot.
*/
private void checkSnapshotSubcontent(List<ApiMessageAndVersion> expected, Iterator<Batch<ApiMessageAndVersion>> iterator) throws Exception {
RecordTestUtils.deepSortRecords(expected);
List<ApiMessageAndVersion> actual = StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false).flatMap(batch -> batch.records().stream()).collect(Collectors.toList());
RecordTestUtils.deepSortRecords(actual);
int expectedIndex = 0;
for (ApiMessageAndVersion current : actual) {
while (expectedIndex < expected.size() && !expected.get(expectedIndex).equals(current)) {
expectedIndex += 1;
}
expectedIndex += 1;
}
assertTrue(expectedIndex <= expected.size(), String.format("actual is not a subset of expected: expected = %s; actual = %s", expected, actual));
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManagerTest method testCreateTopics.
@Test
public void testCreateTopics() throws Exception {
ReplicationControlTestContext ctx = new ReplicationControlTestContext();
ReplicationControlManager replicationControl = ctx.replicationControl;
CreateTopicsRequestData request = new CreateTopicsRequestData();
request.topics().add(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1));
ControllerResult<CreateTopicsResponseData> result = replicationControl.createTopics(request);
CreateTopicsResponseData expectedResponse = new CreateTopicsResponseData();
expectedResponse.topics().add(new CreatableTopicResult().setName("foo").setErrorCode(Errors.INVALID_REPLICATION_FACTOR.code()).setErrorMessage("Unable to replicate the partition 3 time(s): All " + "brokers are currently fenced."));
assertEquals(expectedResponse, result.response());
ctx.registerBrokers(0, 1, 2);
ctx.unfenceBrokers(0, 1, 2);
ControllerResult<CreateTopicsResponseData> result2 = replicationControl.createTopics(request);
CreateTopicsResponseData expectedResponse2 = new CreateTopicsResponseData();
expectedResponse2.topics().add(new CreatableTopicResult().setName("foo").setNumPartitions(1).setReplicationFactor((short) 3).setErrorMessage(null).setErrorCode((short) 0).setTopicId(result2.response().topics().find("foo").topicId()));
assertEquals(expectedResponse2, result2.response());
ctx.replay(result2.records());
assertEquals(new PartitionRegistration(new int[] { 1, 2, 0 }, new int[] { 1, 2, 0 }, Replicas.NONE, Replicas.NONE, 1, 0, 0), replicationControl.getPartition(((TopicRecord) result2.records().get(0).message()).topicId(), 0));
ControllerResult<CreateTopicsResponseData> result3 = replicationControl.createTopics(request);
CreateTopicsResponseData expectedResponse3 = new CreateTopicsResponseData();
expectedResponse3.topics().add(new CreatableTopicResult().setName("foo").setErrorCode(Errors.TOPIC_ALREADY_EXISTS.code()).setErrorMessage("Topic 'foo' already exists."));
assertEquals(expectedResponse3, result3.response());
Uuid fooId = result2.response().topics().find("foo").topicId();
RecordTestUtils.assertBatchIteratorContains(asList(asList(new ApiMessageAndVersion(new PartitionRecord().setPartitionId(0).setTopicId(fooId).setReplicas(asList(1, 2, 0)).setIsr(asList(1, 2, 0)).setRemovingReplicas(Collections.emptyList()).setAddingReplicas(Collections.emptyList()).setLeader(1).setLeaderEpoch(0).setPartitionEpoch(0), (short) 0), new ApiMessageAndVersion(new TopicRecord().setTopicId(fooId).setName("foo"), (short) 0))), ctx.replicationControl.iterator(Long.MAX_VALUE));
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class QuorumControllerTest method testSnapshotSaveAndLoad.
@Test
public void testSnapshotSaveAndLoad() throws Throwable {
final int numBrokers = 4;
Map<Integer, Long> brokerEpochs = new HashMap<>();
RawSnapshotReader reader = null;
Uuid fooId;
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
QuorumController active = controlEnv.activeController();
for (int i = 0; i < numBrokers; i++) {
BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
brokerEpochs.put(i, reply.epoch());
}
for (int i = 0; i < numBrokers - 1; i++) {
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
}
CreateTopicsResponseData fooData = active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
fooId = fooData.topics().find("foo").topicId();
active.allocateProducerIds(new AllocateProducerIdsRequestData().setBrokerId(0).setBrokerEpoch(brokerEpochs.get(0))).get();
long snapshotLogOffset = active.beginWritingSnapshot().get();
reader = logEnv.waitForSnapshot(snapshotLogOffset);
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(reader);
assertEquals(snapshotLogOffset, snapshot.lastContainedLogOffset());
checkSnapshotContent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.of(reader))) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
QuorumController active = controlEnv.activeController();
long snapshotLogOffset = active.beginWritingSnapshot().get();
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(logEnv.waitForSnapshot(snapshotLogOffset));
assertEquals(snapshotLogOffset, snapshot.lastContainedLogOffset());
checkSnapshotContent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManagerTest method testOfflinePartitionAndReplicaImbalanceMetrics.
@Test
public void testOfflinePartitionAndReplicaImbalanceMetrics() throws Exception {
ReplicationControlTestContext ctx = new ReplicationControlTestContext();
ReplicationControlManager replicationControl = ctx.replicationControl;
ctx.registerBrokers(0, 1, 2, 3);
ctx.unfenceBrokers(0, 1, 2, 3);
CreatableTopicResult foo = ctx.createTestTopic("foo", new int[][] { new int[] { 0, 2 }, new int[] { 0, 1 } });
CreatableTopicResult zar = ctx.createTestTopic("zar", new int[][] { new int[] { 0, 1, 2 }, new int[] { 1, 2, 3 }, new int[] { 1, 2, 0 } });
ControllerResult<Void> result = replicationControl.unregisterBroker(0);
ctx.replay(result.records());
// All partitions should still be online after unregistering broker 0
assertEquals(0, ctx.metrics.offlinePartitionCount());
// Three partitions should not have their preferred (first) replica 0
assertEquals(3, ctx.metrics.preferredReplicaImbalanceCount());
result = replicationControl.unregisterBroker(1);
ctx.replay(result.records());
// After unregistering broker 1, 1 partition for topic foo should go offline
assertEquals(1, ctx.metrics.offlinePartitionCount());
// All five partitions should not have their preferred (first) replica at this point
assertEquals(5, ctx.metrics.preferredReplicaImbalanceCount());
result = replicationControl.unregisterBroker(2);
ctx.replay(result.records());
// After unregistering broker 2, the last partition for topic foo should go offline
// and 2 partitions for topic zar should go offline
assertEquals(4, ctx.metrics.offlinePartitionCount());
result = replicationControl.unregisterBroker(3);
ctx.replay(result.records());
// After unregistering broker 3 the last partition for topic zar should go offline
assertEquals(5, ctx.metrics.offlinePartitionCount());
// Deleting topic foo should bring the offline partition count down to 3
ArrayList<ApiMessageAndVersion> records = new ArrayList<>();
replicationControl.deleteTopic(foo.topicId(), records);
ctx.replay(records);
assertEquals(3, ctx.metrics.offlinePartitionCount());
// Deleting topic zar should bring the offline partition count down to 0
records = new ArrayList<>();
replicationControl.deleteTopic(zar.topicId(), records);
ctx.replay(records);
assertEquals(0, ctx.metrics.offlinePartitionCount());
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManagerTest method testElectPreferredLeaders.
@Test
public void testElectPreferredLeaders() throws Exception {
ReplicationControlTestContext ctx = new ReplicationControlTestContext();
ReplicationControlManager replication = ctx.replicationControl;
ctx.registerBrokers(0, 1, 2, 3, 4);
ctx.unfenceBrokers(2, 3, 4);
Uuid fooId = ctx.createTestTopic("foo", new int[][] { new int[] { 1, 2, 3 }, new int[] { 2, 3, 4 }, new int[] { 0, 2, 1 } }).topicId();
ElectLeadersRequestData request1 = new ElectLeadersRequestData().setElectionType(ElectionType.PREFERRED.value).setTopicPartitions(new TopicPartitionsCollection(asList(new TopicPartitions().setTopic("foo").setPartitions(asList(0, 1)), new TopicPartitions().setTopic("bar").setPartitions(asList(0, 1))).iterator()));
ControllerResult<ElectLeadersResponseData> election1Result = replication.electLeaders(request1);
ElectLeadersResponseData expectedResponse1 = buildElectLeadersResponse(NONE, false, Utils.mkMap(Utils.mkEntry(new TopicPartition("foo", 0), new ApiError(PREFERRED_LEADER_NOT_AVAILABLE)), Utils.mkEntry(new TopicPartition("foo", 1), new ApiError(ELECTION_NOT_NEEDED)), Utils.mkEntry(new TopicPartition("bar", 0), new ApiError(UNKNOWN_TOPIC_OR_PARTITION, "No such topic as bar")), Utils.mkEntry(new TopicPartition("bar", 1), new ApiError(UNKNOWN_TOPIC_OR_PARTITION, "No such topic as bar"))));
assertElectLeadersResponse(expectedResponse1, election1Result.response());
assertEquals(Collections.emptyList(), election1Result.records());
ctx.unfenceBrokers(0, 1);
ControllerResult<AlterIsrResponseData> alterIsrResult = replication.alterIsr(new AlterIsrRequestData().setBrokerId(2).setBrokerEpoch(102).setTopics(asList(new AlterIsrRequestData.TopicData().setName("foo").setPartitions(asList(new AlterIsrRequestData.PartitionData().setPartitionIndex(0).setCurrentIsrVersion(0).setLeaderEpoch(0).setNewIsr(asList(1, 2, 3)))))));
assertEquals(new AlterIsrResponseData().setTopics(asList(new AlterIsrResponseData.TopicData().setName("foo").setPartitions(asList(new AlterIsrResponseData.PartitionData().setPartitionIndex(0).setLeaderId(2).setLeaderEpoch(0).setIsr(asList(1, 2, 3)).setCurrentIsrVersion(1).setErrorCode(NONE.code()))))), alterIsrResult.response());
ElectLeadersResponseData expectedResponse2 = buildElectLeadersResponse(NONE, false, Utils.mkMap(Utils.mkEntry(new TopicPartition("foo", 0), ApiError.NONE), Utils.mkEntry(new TopicPartition("foo", 1), new ApiError(ELECTION_NOT_NEEDED)), Utils.mkEntry(new TopicPartition("bar", 0), new ApiError(UNKNOWN_TOPIC_OR_PARTITION, "No such topic as bar")), Utils.mkEntry(new TopicPartition("bar", 1), new ApiError(UNKNOWN_TOPIC_OR_PARTITION, "No such topic as bar"))));
ctx.replay(alterIsrResult.records());
ControllerResult<ElectLeadersResponseData> election2Result = replication.electLeaders(request1);
assertElectLeadersResponse(expectedResponse2, election2Result.response());
assertEquals(asList(new ApiMessageAndVersion(new PartitionChangeRecord().setPartitionId(0).setTopicId(fooId).setLeader(1), (short) 0)), election2Result.records());
}
Aggregations