use of org.apache.kafka.common.message.BrokerRegistrationRequestData in project kafka by apache.
the class QuorumControllerTest method registerBrokers.
private Map<Integer, Long> registerBrokers(QuorumController controller, int numBrokers) throws Exception {
Map<Integer, Long> brokerEpochs = new HashMap<>();
for (int brokerId = 0; brokerId < numBrokers; brokerId++) {
BrokerRegistrationReply reply = controller.registerBroker(new BrokerRegistrationRequestData().setBrokerId(brokerId).setRack(null).setClusterId(controller.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + brokerId)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + brokerId)).iterator()))).get();
brokerEpochs.put(brokerId, reply.epoch());
// Send heartbeat to unfence
controller.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(brokerId)).setBrokerId(brokerId).setCurrentMetadataOffset(100000L)).get();
}
return brokerEpochs;
}
use of org.apache.kafka.common.message.BrokerRegistrationRequestData in project kafka by apache.
the class QuorumControllerTest method testUnregisterBroker.
@Test
public void testUnregisterBroker() throws Throwable {
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(1, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
ListenerCollection listeners = new ListenerCollection();
listeners.add(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092));
QuorumController active = controlEnv.activeController();
CompletableFuture<BrokerRegistrationReply> reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(0).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwBA")).setListeners(listeners));
assertEquals(0L, reply.get().epoch());
CreateTopicsRequestData createTopicsRequestData = new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(1).setReplicationFactor((short) 1)).iterator()));
assertEquals(Errors.INVALID_REPLICATION_FACTOR.code(), active.createTopics(createTopicsRequestData).get().topics().find("foo").errorCode());
assertEquals("Unable to replicate the partition 1 time(s): All brokers " + "are currently fenced.", active.createTopics(createTopicsRequestData).get().topics().find("foo").errorMessage());
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(0L).setBrokerId(0).setCurrentMetadataOffset(100000L)).get());
assertEquals(Errors.NONE.code(), active.createTopics(createTopicsRequestData).get().topics().find("foo").errorCode());
CompletableFuture<TopicIdPartition> topicPartitionFuture = active.appendReadEvent("debugGetPartition", () -> {
Iterator<TopicIdPartition> iterator = active.replicationControl().brokersToIsrs().iterator(0, true);
assertTrue(iterator.hasNext());
return iterator.next();
});
assertEquals(0, topicPartitionFuture.get().partitionId());
active.unregisterBroker(0).get();
topicPartitionFuture = active.appendReadEvent("debugGetPartition", () -> {
Iterator<TopicIdPartition> iterator = active.replicationControl().brokersToIsrs().partitionsWithNoLeader();
assertTrue(iterator.hasNext());
return iterator.next();
});
assertEquals(0, topicPartitionFuture.get().partitionId());
}
}
}
use of org.apache.kafka.common.message.BrokerRegistrationRequestData in project kafka by apache.
the class QuorumControllerTest method testDelayedConfigurationOperations.
/**
* Test that an incrementalAlterConfigs operation doesn't complete until the records
* can be written to the metadata log.
*/
@Test
public void testDelayedConfigurationOperations() throws Throwable {
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(1, Optional.empty());
QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
controlEnv.activeController().registerBroker(new BrokerRegistrationRequestData().setBrokerId(0).setClusterId(logEnv.clusterId())).get();
testDelayedConfigurationOperations(logEnv, controlEnv.activeController());
}
}
use of org.apache.kafka.common.message.BrokerRegistrationRequestData in project kafka by apache.
the class QuorumControllerTest method testConfigurationOperations.
/**
* Test setting some configuration values and reading them back.
*/
@Test
public void testConfigurationOperations() throws Throwable {
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(1, Optional.empty());
QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
controlEnv.activeController().registerBroker(new BrokerRegistrationRequestData().setBrokerId(0).setClusterId(logEnv.clusterId())).get();
testConfigurationOperations(controlEnv.activeController());
}
}
use of org.apache.kafka.common.message.BrokerRegistrationRequestData in project kafka by apache.
the class QuorumControllerTest method testSnapshotConfiguration.
@Test
public void testSnapshotConfiguration() throws Throwable {
final int numBrokers = 4;
final int maxNewRecordBytes = 4;
Map<Integer, Long> brokerEpochs = new HashMap<>();
Uuid fooId;
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, builder -> {
builder.setConfigDefs(CONFIGS).setSnapshotMaxNewRecordBytes(maxNewRecordBytes);
})) {
QuorumController active = controlEnv.activeController();
for (int i = 0; i < numBrokers; i++) {
BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
brokerEpochs.put(i, reply.epoch());
}
for (int i = 0; i < numBrokers - 1; i++) {
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
}
CreateTopicsResponseData fooData = active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
fooId = fooData.topics().find("foo").topicId();
active.allocateProducerIds(new AllocateProducerIdsRequestData().setBrokerId(0).setBrokerEpoch(brokerEpochs.get(0))).get();
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(logEnv.waitForLatestSnapshot());
checkSnapshotSubcontent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
}
Aggregations