Search in sources :

Example 11 with LocalLogManagerTestEnv

use of org.apache.kafka.metalog.LocalLogManagerTestEnv in project kafka by apache.

the class QuorumControllerTest method testFenceMultipleBrokers.

@Test
public void testFenceMultipleBrokers() throws Throwable {
    List<Integer> allBrokers = Arrays.asList(1, 2, 3, 4, 5);
    List<Integer> brokersToKeepUnfenced = Arrays.asList(1);
    List<Integer> brokersToFence = Arrays.asList(2, 3, 4, 5);
    short replicationFactor = 5;
    long sessionTimeoutMillis = 1000;
    try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(1, Optional.empty());
        QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS), Optional.of(sessionTimeoutMillis))) {
        ListenerCollection listeners = new ListenerCollection();
        listeners.add(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092));
        QuorumController active = controlEnv.activeController();
        Map<Integer, Long> brokerEpochs = new HashMap<>();
        for (Integer brokerId : allBrokers) {
            CompletableFuture<BrokerRegistrationReply> reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(brokerId).setClusterId(active.clusterId()).setIncarnationId(Uuid.randomUuid()).setListeners(listeners));
            brokerEpochs.put(brokerId, reply.get().epoch());
        }
        // Brokers are only registered and should still be fenced
        allBrokers.forEach(brokerId -> {
            assertFalse(active.replicationControl().isBrokerUnfenced(brokerId), "Broker " + brokerId + " should have been fenced");
        });
        // Unfence all brokers and create a topic foo
        sendBrokerheartbeat(active, allBrokers, brokerEpochs);
        CreateTopicsRequestData createTopicsRequestData = new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(1).setReplicationFactor(replicationFactor)).iterator()));
        CreateTopicsResponseData createTopicsResponseData = active.createTopics(createTopicsRequestData).get();
        assertEquals(Errors.NONE, Errors.forCode(createTopicsResponseData.topics().find("foo").errorCode()));
        Uuid topicIdFoo = createTopicsResponseData.topics().find("foo").topicId();
        // Fence some of the brokers
        TestUtils.waitForCondition(() -> {
            sendBrokerheartbeat(active, brokersToKeepUnfenced, brokerEpochs);
            for (Integer brokerId : brokersToFence) {
                if (active.replicationControl().isBrokerUnfenced(brokerId)) {
                    return false;
                }
            }
            return true;
        }, sessionTimeoutMillis * 3, "Fencing of brokers did not process within expected time");
        // Send another heartbeat to the brokers we want to keep alive
        sendBrokerheartbeat(active, brokersToKeepUnfenced, brokerEpochs);
        // At this point only the brokers we want fenced should be fenced.
        brokersToKeepUnfenced.forEach(brokerId -> {
            assertTrue(active.replicationControl().isBrokerUnfenced(brokerId), "Broker " + brokerId + " should have been unfenced");
        });
        brokersToFence.forEach(brokerId -> {
            assertFalse(active.replicationControl().isBrokerUnfenced(brokerId), "Broker " + brokerId + " should have been fenced");
        });
        // Verify the isr and leaders for the topic partition
        int[] expectedIsr = { 1 };
        int[] isrFoo = active.replicationControl().getPartition(topicIdFoo, 0).isr;
        assertTrue(Arrays.equals(isrFoo, expectedIsr), "The ISR for topic foo was " + Arrays.toString(isrFoo) + ". It is expected to be " + Arrays.toString(expectedIsr));
        int fooLeader = active.replicationControl().getPartition(topicIdFoo, 0).leader;
        assertEquals(expectedIsr[0], fooLeader);
    }
}
Also used : ListenerCollection(org.apache.kafka.common.message.BrokerRegistrationRequestData.ListenerCollection) LocalLogManagerTestEnv(org.apache.kafka.metalog.LocalLogManagerTestEnv) Listener(org.apache.kafka.common.message.BrokerRegistrationRequestData.Listener) HashMap(java.util.HashMap) BrokerRegistrationRequestData(org.apache.kafka.common.message.BrokerRegistrationRequestData) BrokerRegistrationReply(org.apache.kafka.metadata.BrokerRegistrationReply) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) BrokerEndpoint(org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) Uuid(org.apache.kafka.common.Uuid) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) Test(org.junit.jupiter.api.Test)

Example 12 with LocalLogManagerTestEnv

use of org.apache.kafka.metalog.LocalLogManagerTestEnv in project kafka by apache.

the class QuorumControllerTest method testSnapshotOnlyAfterConfiguredMinBytes.

@Test
public void testSnapshotOnlyAfterConfiguredMinBytes() throws Throwable {
    final int numBrokers = 4;
    final int maxNewRecordBytes = 1000;
    Map<Integer, Long> brokerEpochs = new HashMap<>();
    try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
        try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, builder -> builder.setConfigDefs(CONFIGS).setSnapshotMaxNewRecordBytes(maxNewRecordBytes))) {
            QuorumController active = controlEnv.activeController();
            for (int i = 0; i < numBrokers; i++) {
                BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
                brokerEpochs.put(i, reply.epoch());
                assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
            }
            assertTrue(logEnv.appendedBytes() < maxNewRecordBytes, String.format("%s appended bytes is not less than %s max new record bytes", logEnv.appendedBytes(), maxNewRecordBytes));
            // Keep creating topic until we reached the max bytes limit
            int counter = 0;
            while (logEnv.appendedBytes() < maxNewRecordBytes) {
                counter += 1;
                String topicName = String.format("foo-%s", counter);
                active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName(topicName).setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
            }
            logEnv.waitForLatestSnapshot();
        }
    }
}
Also used : BrokerHeartbeatReply(org.apache.kafka.metadata.BrokerHeartbeatReply) ListenerCollection(org.apache.kafka.common.message.BrokerRegistrationRequestData.ListenerCollection) LocalLogManagerTestEnv(org.apache.kafka.metalog.LocalLogManagerTestEnv) Listener(org.apache.kafka.common.message.BrokerRegistrationRequestData.Listener) HashMap(java.util.HashMap) BrokerRegistrationRequestData(org.apache.kafka.common.message.BrokerRegistrationRequestData) BrokerRegistrationReply(org.apache.kafka.metadata.BrokerRegistrationReply) BrokerEndpoint(org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) BrokerHeartbeatRequestData(org.apache.kafka.common.message.BrokerHeartbeatRequestData) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) CreatableReplicaAssignment(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) CreatableReplicaAssignmentCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection) Test(org.junit.jupiter.api.Test)

Aggregations

LocalLogManagerTestEnv (org.apache.kafka.metalog.LocalLogManagerTestEnv)12 Test (org.junit.jupiter.api.Test)12 CreateTopicsRequestData (org.apache.kafka.common.message.CreateTopicsRequestData)9 BrokerRegistrationRequestData (org.apache.kafka.common.message.BrokerRegistrationRequestData)8 CreatableTopic (org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic)8 CreatableTopicCollection (org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection)8 HashMap (java.util.HashMap)7 Uuid (org.apache.kafka.common.Uuid)6 Listener (org.apache.kafka.common.message.BrokerRegistrationRequestData.Listener)6 ListenerCollection (org.apache.kafka.common.message.BrokerRegistrationRequestData.ListenerCollection)6 CreateTopicsResponseData (org.apache.kafka.common.message.CreateTopicsResponseData)6 BrokerRegistrationReply (org.apache.kafka.metadata.BrokerRegistrationReply)6 BrokerHeartbeatRequestData (org.apache.kafka.common.message.BrokerHeartbeatRequestData)5 BrokerHeartbeatReply (org.apache.kafka.metadata.BrokerHeartbeatReply)5 BrokerEndpoint (org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint)4 List (java.util.List)3 Map (java.util.Map)3 CountDownLatch (java.util.concurrent.CountDownLatch)3 AlterPartitionReassignmentsRequestData (org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData)3 AlterPartitionReassignmentsResponseData (org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData)3