Search in sources :

Example 1 with PartitionRecord

use of org.apache.kafka.common.metadata.PartitionRecord in project kafka by apache.

the class MetadataNodeManagerTest method testPartitionRecord.

@Test
public void testPartitionRecord() {
    PartitionRecord record = new PartitionRecord().setTopicId(Uuid.fromString("GcaQDl2UTsCNs1p9s37XkQ")).setPartitionId(0).setLeaderEpoch(1).setReplicas(Arrays.asList(1, 2, 3)).setIsr(Arrays.asList(1, 2, 3));
    metadataNodeManager.handleMessage(record);
    assertEquals(PartitionRecordJsonConverter.write(record, PartitionRecord.HIGHEST_SUPPORTED_VERSION).toPrettyString(), metadataNodeManager.getData().root().directory("topicIds", "GcaQDl2UTsCNs1p9s37XkQ", "0").file("data").contents());
}
Also used : PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) Test(org.junit.jupiter.api.Test)

Example 2 with PartitionRecord

use of org.apache.kafka.common.metadata.PartitionRecord in project kafka by apache.

the class TopicsImageTest method testBasicLocalChanges.

@Test
public void testBasicLocalChanges() {
    int localId = 3;
    /* Changes already include in DELTA1_RECORDS and IMAGE1:
         * foo - topic id deleted
         * bar-0 - stay as follower with different partition epoch
         * baz-0 - new topic to leader
         */
    List<ApiMessageAndVersion> topicRecords = new ArrayList<>(DELTA1_RECORDS);
    // Create a new foo topic with a different id
    Uuid newFooId = Uuid.fromString("b66ybsWIQoygs01vdjH07A");
    topicRecords.add(new ApiMessageAndVersion(new TopicRecord().setName("foo").setTopicId(newFooId), TOPIC_RECORD.highestSupportedVersion()));
    topicRecords.add(newPartitionRecord(newFooId, 0, Arrays.asList(0, 1, 2)));
    topicRecords.add(newPartitionRecord(newFooId, 1, Arrays.asList(0, 1, localId)));
    // baz-1 - new partition to follower
    topicRecords.add(new ApiMessageAndVersion(new PartitionRecord().setPartitionId(1).setTopicId(BAZ_UUID).setReplicas(Arrays.asList(4, 2, localId)).setIsr(Arrays.asList(4, 2, localId)).setLeader(4).setLeaderEpoch(2).setPartitionEpoch(1), PARTITION_RECORD.highestSupportedVersion()));
    TopicsDelta delta = new TopicsDelta(IMAGE1);
    RecordTestUtils.replayAll(delta, topicRecords);
    LocalReplicaChanges changes = delta.localChanges(localId);
    assertEquals(new HashSet<>(Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("foo", 1))), changes.deletes());
    assertEquals(new HashSet<>(Arrays.asList(new TopicPartition("baz", 0))), changes.leaders().keySet());
    assertEquals(new HashSet<>(Arrays.asList(new TopicPartition("baz", 1), new TopicPartition("bar", 0), new TopicPartition("foo", 1))), changes.followers().keySet());
}
Also used : RemoveTopicRecord(org.apache.kafka.common.metadata.RemoveTopicRecord) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) Uuid(org.apache.kafka.common.Uuid) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) Test(org.junit.jupiter.api.Test)

Example 3 with PartitionRecord

use of org.apache.kafka.common.metadata.PartitionRecord in project kafka by apache.

the class ReplicationControlManagerTest method testCreateTopics.

@Test
public void testCreateTopics() throws Exception {
    ReplicationControlTestContext ctx = new ReplicationControlTestContext();
    ReplicationControlManager replicationControl = ctx.replicationControl;
    CreateTopicsRequestData request = new CreateTopicsRequestData();
    request.topics().add(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1));
    ControllerResult<CreateTopicsResponseData> result = replicationControl.createTopics(request);
    CreateTopicsResponseData expectedResponse = new CreateTopicsResponseData();
    expectedResponse.topics().add(new CreatableTopicResult().setName("foo").setErrorCode(Errors.INVALID_REPLICATION_FACTOR.code()).setErrorMessage("Unable to replicate the partition 3 time(s): All " + "brokers are currently fenced."));
    assertEquals(expectedResponse, result.response());
    ctx.registerBrokers(0, 1, 2);
    ctx.unfenceBrokers(0, 1, 2);
    ControllerResult<CreateTopicsResponseData> result2 = replicationControl.createTopics(request);
    CreateTopicsResponseData expectedResponse2 = new CreateTopicsResponseData();
    expectedResponse2.topics().add(new CreatableTopicResult().setName("foo").setNumPartitions(1).setReplicationFactor((short) 3).setErrorMessage(null).setErrorCode((short) 0).setTopicId(result2.response().topics().find("foo").topicId()));
    assertEquals(expectedResponse2, result2.response());
    ctx.replay(result2.records());
    assertEquals(new PartitionRegistration(new int[] { 1, 2, 0 }, new int[] { 1, 2, 0 }, Replicas.NONE, Replicas.NONE, 1, 0, 0), replicationControl.getPartition(((TopicRecord) result2.records().get(0).message()).topicId(), 0));
    ControllerResult<CreateTopicsResponseData> result3 = replicationControl.createTopics(request);
    CreateTopicsResponseData expectedResponse3 = new CreateTopicsResponseData();
    expectedResponse3.topics().add(new CreatableTopicResult().setName("foo").setErrorCode(Errors.TOPIC_ALREADY_EXISTS.code()).setErrorMessage("Topic 'foo' already exists."));
    assertEquals(expectedResponse3, result3.response());
    Uuid fooId = result2.response().topics().find("foo").topicId();
    RecordTestUtils.assertBatchIteratorContains(asList(asList(new ApiMessageAndVersion(new PartitionRecord().setPartitionId(0).setTopicId(fooId).setReplicas(asList(1, 2, 0)).setIsr(asList(1, 2, 0)).setRemovingReplicas(Collections.emptyList()).setAddingReplicas(Collections.emptyList()).setLeader(1).setLeaderEpoch(0).setPartitionEpoch(0), (short) 0), new ApiMessageAndVersion(new TopicRecord().setTopicId(fooId).setName("foo"), (short) 0))), ctx.replicationControl.iterator(Long.MAX_VALUE));
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) Uuid(org.apache.kafka.common.Uuid) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 4 with PartitionRecord

use of org.apache.kafka.common.metadata.PartitionRecord in project kafka by apache.

the class ReplicationControlManager method createPartitions.

void createPartitions(CreatePartitionsTopic topic, List<ApiMessageAndVersion> records) {
    Uuid topicId = topicsByName.get(topic.name());
    if (topicId == null) {
        throw new UnknownTopicOrPartitionException();
    }
    TopicControlInfo topicInfo = topics.get(topicId);
    if (topicInfo == null) {
        throw new UnknownTopicOrPartitionException();
    }
    if (topic.count() == topicInfo.parts.size()) {
        throw new InvalidPartitionsException("Topic already has " + topicInfo.parts.size() + " partition(s).");
    } else if (topic.count() < topicInfo.parts.size()) {
        throw new InvalidPartitionsException("The topic " + topic.name() + " currently " + "has " + topicInfo.parts.size() + " partition(s); " + topic.count() + " would not be an increase.");
    }
    int additional = topic.count() - topicInfo.parts.size();
    if (topic.assignments() != null) {
        if (topic.assignments().size() != additional) {
            throw new InvalidReplicaAssignmentException("Attempted to add " + additional + " additional partition(s), but only " + topic.assignments().size() + " assignment(s) were specified.");
        }
    }
    Iterator<PartitionRegistration> iterator = topicInfo.parts.values().iterator();
    if (!iterator.hasNext()) {
        throw new UnknownServerException("Invalid state: topic " + topic.name() + " appears to have no partitions.");
    }
    PartitionRegistration partitionInfo = iterator.next();
    if (partitionInfo.replicas.length > Short.MAX_VALUE) {
        throw new UnknownServerException("Invalid replication factor " + partitionInfo.replicas.length + ": expected a number equal to less than " + Short.MAX_VALUE);
    }
    short replicationFactor = (short) partitionInfo.replicas.length;
    int startPartitionId = topicInfo.parts.size();
    List<List<Integer>> placements;
    List<List<Integer>> isrs;
    if (topic.assignments() != null) {
        placements = new ArrayList<>();
        isrs = new ArrayList<>();
        for (int i = 0; i < topic.assignments().size(); i++) {
            CreatePartitionsAssignment assignment = topic.assignments().get(i);
            validateManualPartitionAssignment(assignment.brokerIds(), OptionalInt.of(replicationFactor));
            placements.add(assignment.brokerIds());
            List<Integer> isr = assignment.brokerIds().stream().filter(clusterControl::unfenced).collect(Collectors.toList());
            if (isr.isEmpty()) {
                throw new InvalidReplicaAssignmentException("All brokers specified in the manual partition assignment for " + "partition " + (startPartitionId + i) + " are fenced.");
            }
            isrs.add(isr);
        }
    } else {
        placements = clusterControl.placeReplicas(startPartitionId, additional, replicationFactor);
        isrs = placements;
    }
    int partitionId = startPartitionId;
    for (int i = 0; i < placements.size(); i++) {
        List<Integer> placement = placements.get(i);
        List<Integer> isr = isrs.get(i);
        records.add(new ApiMessageAndVersion(new PartitionRecord().setPartitionId(partitionId).setTopicId(topicId).setReplicas(placement).setIsr(isr).setRemovingReplicas(Collections.emptyList()).setAddingReplicas(Collections.emptyList()).setLeader(isr.get(0)).setLeaderEpoch(0).setPartitionEpoch(0), PARTITION_RECORD.highestSupportedVersion()));
        partitionId++;
    }
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) InvalidPartitionsException(org.apache.kafka.common.errors.InvalidPartitionsException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) Uuid(org.apache.kafka.common.Uuid) CreatePartitionsAssignment(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) List(java.util.List) InvalidReplicaAssignmentException(org.apache.kafka.common.errors.InvalidReplicaAssignmentException)

Example 5 with PartitionRecord

use of org.apache.kafka.common.metadata.PartitionRecord in project kafka by apache.

the class MetadataNodeManagerTest method testPartitionChangeRecord.

@Test
public void testPartitionChangeRecord() {
    PartitionRecord oldPartitionRecord = new PartitionRecord().setTopicId(Uuid.fromString("GcaQDl2UTsCNs1p9s37XkQ")).setPartitionId(0).setPartitionEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(Arrays.asList(0, 1, 2)).setReplicas(Arrays.asList(0, 1, 2));
    PartitionChangeRecord partitionChangeRecord = new PartitionChangeRecord().setTopicId(Uuid.fromString("GcaQDl2UTsCNs1p9s37XkQ")).setPartitionId(0).setLeader(NO_LEADER_CHANGE).setReplicas(Arrays.asList(0, 1, 2));
    PartitionRecord newPartitionRecord = new PartitionRecord().setTopicId(Uuid.fromString("GcaQDl2UTsCNs1p9s37XkQ")).setPartitionId(0).setPartitionEpoch(1).setLeader(0).setLeaderEpoch(0).setIsr(Arrays.asList(0, 1, 2)).setReplicas(Arrays.asList(0, 1, 2));
    // Change nothing
    checkPartitionChangeRecord(oldPartitionRecord, partitionChangeRecord, newPartitionRecord);
    // Change isr
    checkPartitionChangeRecord(oldPartitionRecord, partitionChangeRecord.duplicate().setIsr(Arrays.asList(0, 2)), newPartitionRecord.duplicate().setIsr(Arrays.asList(0, 2)));
    // Change leader
    checkPartitionChangeRecord(oldPartitionRecord, partitionChangeRecord.duplicate().setLeader(1), newPartitionRecord.duplicate().setLeader(1).setLeaderEpoch(1));
}
Also used : PartitionChangeRecord(org.apache.kafka.common.metadata.PartitionChangeRecord) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) Test(org.junit.jupiter.api.Test)

Aggregations

PartitionRecord (org.apache.kafka.common.metadata.PartitionRecord)5 Test (org.junit.jupiter.api.Test)4 Uuid (org.apache.kafka.common.Uuid)3 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)3 ArrayList (java.util.ArrayList)2 TopicRecord (org.apache.kafka.common.metadata.TopicRecord)2 PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)2 List (java.util.List)1 TopicPartition (org.apache.kafka.common.TopicPartition)1 InvalidPartitionsException (org.apache.kafka.common.errors.InvalidPartitionsException)1 InvalidReplicaAssignmentException (org.apache.kafka.common.errors.InvalidReplicaAssignmentException)1 UnknownServerException (org.apache.kafka.common.errors.UnknownServerException)1 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)1 CreatePartitionsAssignment (org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment)1 CreateTopicsRequestData (org.apache.kafka.common.message.CreateTopicsRequestData)1 CreatableTopic (org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic)1 CreateTopicsResponseData (org.apache.kafka.common.message.CreateTopicsResponseData)1 CreatableTopicResult (org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult)1 PartitionChangeRecord (org.apache.kafka.common.metadata.PartitionChangeRecord)1 RemoveTopicRecord (org.apache.kafka.common.metadata.RemoveTopicRecord)1