use of org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint in project kafka by apache.
the class BrokerRegistration method fromRecord.
public static BrokerRegistration fromRecord(RegisterBrokerRecord record) {
Map<String, Endpoint> listeners = new HashMap<>();
for (BrokerEndpoint endpoint : record.endPoints()) {
listeners.put(endpoint.name(), new Endpoint(endpoint.name(), SecurityProtocol.forId(endpoint.securityProtocol()), endpoint.host(), endpoint.port()));
}
Map<String, VersionRange> supportedFeatures = new HashMap<>();
for (BrokerFeature feature : record.features()) {
supportedFeatures.put(feature.name(), new VersionRange(feature.minSupportedVersion(), feature.maxSupportedVersion()));
}
return new BrokerRegistration(record.brokerId(), record.brokerEpoch(), record.incarnationId(), listeners, supportedFeatures, Optional.ofNullable(record.rack()), record.fenced());
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint in project kafka by apache.
the class ClusterControlManagerTest method testIterator.
@Test
public void testIterator() throws Exception {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), time, snapshotRegistry, 1000, new StripedReplicaPlacer(new Random()), new MockControllerMetrics());
clusterControl.activate();
assertFalse(clusterControl.unfenced(0));
for (int i = 0; i < 3; i++) {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(i).setRack(null);
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092 + i).setName("PLAINTEXT").setHost("example.com"));
clusterControl.replay(brokerRecord);
}
for (int i = 0; i < 2; i++) {
UnfenceBrokerRecord unfenceBrokerRecord = new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceBrokerRecord);
}
RecordTestUtils.assertBatchIteratorContains(Arrays.asList(Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(0).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(false), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9093).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(false), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(2).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9094).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(true), (short) 0))), clusterControl.iterator(Long.MAX_VALUE));
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint in project kafka by apache.
the class ClusterControlManagerTest method testPlaceReplicas.
@ParameterizedTest
@ValueSource(ints = { 3, 10 })
public void testPlaceReplicas(int numUsableBrokers) throws Exception {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
MockRandom random = new MockRandom();
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), time, snapshotRegistry, 1000, new StripedReplicaPlacer(random), new MockControllerMetrics());
clusterControl.activate();
for (int i = 0; i < numUsableBrokers; i++) {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(i);
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com"));
clusterControl.replay(brokerRecord);
UnfenceBrokerRecord unfenceRecord = new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceRecord);
clusterControl.heartbeatManager().touch(i, false, 0);
}
for (int i = 0; i < numUsableBrokers; i++) {
assertTrue(clusterControl.unfenced(i), String.format("broker %d was not unfenced.", i));
}
for (int i = 0; i < 100; i++) {
List<List<Integer>> results = clusterControl.placeReplicas(0, 1, (short) 3);
HashSet<Integer> seen = new HashSet<>();
for (Integer result : results.get(0)) {
assertTrue(result >= 0);
assertTrue(result < numUsableBrokers);
assertTrue(seen.add(result));
}
}
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint in project kafka by apache.
the class ClusterControlManagerTest method testReplay.
@Test
public void testReplay() {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), time, snapshotRegistry, 1000, new StripedReplicaPlacer(new Random()), new MockControllerMetrics());
clusterControl.activate();
assertFalse(clusterControl.unfenced(0));
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1);
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com"));
clusterControl.replay(brokerRecord);
clusterControl.checkBrokerEpoch(1, 100);
assertThrows(StaleBrokerEpochException.class, () -> clusterControl.checkBrokerEpoch(1, 101));
assertThrows(StaleBrokerEpochException.class, () -> clusterControl.checkBrokerEpoch(2, 100));
assertFalse(clusterControl.unfenced(0));
assertFalse(clusterControl.unfenced(1));
UnfenceBrokerRecord unfenceBrokerRecord = new UnfenceBrokerRecord().setId(1).setEpoch(100);
clusterControl.replay(unfenceBrokerRecord);
assertFalse(clusterControl.unfenced(0));
assertTrue(clusterControl.unfenced(1));
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint in project kafka by apache.
the class ClusterControlManagerTest method testUnregister.
@Test
public void testUnregister() throws Exception {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(100).setIncarnationId(Uuid.fromString("fPZv1VBsRFmnlRvmGcOW9w")).setRack("arack");
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com"));
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), new MockTime(0, 0, 0), snapshotRegistry, 1000, new StripedReplicaPlacer(new Random()), new MockControllerMetrics());
clusterControl.activate();
clusterControl.replay(brokerRecord);
assertEquals(new BrokerRegistration(1, 100, Uuid.fromString("fPZv1VBsRFmnlRvmGcOW9w"), Collections.singletonMap("PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "example.com", 9092)), Collections.emptyMap(), Optional.of("arack"), true), clusterControl.brokerRegistrations().get(1));
UnregisterBrokerRecord unregisterRecord = new UnregisterBrokerRecord().setBrokerId(1).setBrokerEpoch(100);
clusterControl.replay(unregisterRecord);
assertFalse(clusterControl.brokerRegistrations().containsKey(1));
}
Aggregations