use of org.apache.kafka.common.metadata.RegisterBrokerRecord in project kafka by apache.
the class MetadataNodeManagerTest method testUnfenceBrokerRecordAndFenceBrokerRecord.
@Test
public void testUnfenceBrokerRecordAndFenceBrokerRecord() {
RegisterBrokerRecord record = new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(2);
metadataNodeManager.handleMessage(record);
assertEquals("true", metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents());
UnfenceBrokerRecord unfenceBrokerRecord = new UnfenceBrokerRecord().setId(1).setEpoch(2);
metadataNodeManager.handleMessage(unfenceBrokerRecord);
assertEquals("false", metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents());
FenceBrokerRecord fenceBrokerRecord = new FenceBrokerRecord().setId(1).setEpoch(2);
metadataNodeManager.handleMessage(fenceBrokerRecord);
assertEquals("true", metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents());
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord in project kafka by apache.
the class MetadataNodeManagerTest method testRegisterBrokerRecordAndUnregisterBrokerRecord.
@Test
public void testRegisterBrokerRecordAndUnregisterBrokerRecord() {
// Register broker
RegisterBrokerRecord record = new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(2);
metadataNodeManager.handleMessage(record);
assertEquals(record.toString(), metadataNodeManager.getData().root().directory("brokers", "1").file("registration").contents());
assertEquals("true", metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents());
// Unregister broker
UnregisterBrokerRecord unregisterBrokerRecord = new UnregisterBrokerRecord().setBrokerId(1);
metadataNodeManager.handleMessage(unregisterBrokerRecord);
assertFalse(metadataNodeManager.getData().root().directory("brokers").children().containsKey("1"));
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord in project kafka by apache.
the class MetadataRecordSerdeTest method testParsingRecordWithGarbageAtEnd.
/**
* Test attempting to parse an event which has a malformed message version varint.
*/
@Test
public void testParsingRecordWithGarbageAtEnd() {
MetadataRecordSerde serde = new MetadataRecordSerde();
RegisterBrokerRecord message = new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(2);
ObjectSerializationCache cache = new ObjectSerializationCache();
ApiMessageAndVersion messageAndVersion = new ApiMessageAndVersion(message, (short) 0);
int size = serde.recordSize(messageAndVersion, cache);
ByteBuffer buffer = ByteBuffer.allocate(size + 1);
serde.write(messageAndVersion, cache, new ByteBufferAccessor(buffer));
buffer.clear();
assertStartsWith("Found 1 byte(s) of garbage after", assertThrows(MetadataParseException.class, () -> serde.read(new ByteBufferAccessor(buffer), size + 1)).getMessage());
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord in project kafka by apache.
the class ClusterControlManagerTest method testIterator.
@Test
public void testIterator() throws Exception {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), time, snapshotRegistry, 1000, new StripedReplicaPlacer(new Random()), new MockControllerMetrics());
clusterControl.activate();
assertFalse(clusterControl.unfenced(0));
for (int i = 0; i < 3; i++) {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(i).setRack(null);
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092 + i).setName("PLAINTEXT").setHost("example.com"));
clusterControl.replay(brokerRecord);
}
for (int i = 0; i < 2; i++) {
UnfenceBrokerRecord unfenceBrokerRecord = new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceBrokerRecord);
}
RecordTestUtils.assertBatchIteratorContains(Arrays.asList(Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(0).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(false), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9093).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(false), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(2).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9094).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(true), (short) 0))), clusterControl.iterator(Long.MAX_VALUE));
}
use of org.apache.kafka.common.metadata.RegisterBrokerRecord in project kafka by apache.
the class ClusterControlManagerTest method testPlaceReplicas.
@ParameterizedTest
@ValueSource(ints = { 3, 10 })
public void testPlaceReplicas(int numUsableBrokers) throws Exception {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
MockRandom random = new MockRandom();
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), time, snapshotRegistry, 1000, new StripedReplicaPlacer(random), new MockControllerMetrics());
clusterControl.activate();
for (int i = 0; i < numUsableBrokers; i++) {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(i);
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com"));
clusterControl.replay(brokerRecord);
UnfenceBrokerRecord unfenceRecord = new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceRecord);
clusterControl.heartbeatManager().touch(i, false, 0);
}
for (int i = 0; i < numUsableBrokers; i++) {
assertTrue(clusterControl.unfenced(i), String.format("broker %d was not unfenced.", i));
}
for (int i = 0; i < 100; i++) {
List<List<Integer>> results = clusterControl.placeReplicas(0, 1, (short) 3);
HashSet<Integer> seen = new HashSet<>();
for (Integer result : results.get(0)) {
assertTrue(result >= 0);
assertTrue(result < numUsableBrokers);
assertTrue(seen.add(result));
}
}
}
Aggregations