use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ProducerIdControlManagerTest method testSnapshotIterator.
@Test
public void testSnapshotIterator() {
ProducerIdsBlock range = null;
for (int i = 0; i < 100; i++) {
range = generateProducerIds(producerIdControlManager, i % 4, 100);
}
Iterator<List<ApiMessageAndVersion>> snapshotIterator = producerIdControlManager.iterator(Long.MAX_VALUE);
assertTrue(snapshotIterator.hasNext());
List<ApiMessageAndVersion> batch = snapshotIterator.next();
assertEquals(1, batch.size(), "Producer IDs record batch should only contain a single record");
assertEquals(range.firstProducerId() + range.size(), ((ProducerIdsRecord) batch.get(0).message()).nextProducerId());
assertFalse(snapshotIterator.hasNext(), "Producer IDs iterator should only contain a single batch");
ProducerIdControlManager newProducerIdManager = new ProducerIdControlManager(clusterControl, snapshotRegistry);
snapshotIterator = producerIdControlManager.iterator(Long.MAX_VALUE);
while (snapshotIterator.hasNext()) {
snapshotIterator.next().forEach(message -> newProducerIdManager.replay((ProducerIdsRecord) message.message()));
}
// Verify that after reloading state from this "snapshot", we don't produce any overlapping IDs
long lastProducerID = range.firstProducerId() + range.size() - 1;
range = generateProducerIds(producerIdControlManager, 1, 100);
assertTrue(range.firstProducerId() > lastProducerID);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ClusterControlManagerTest method testIterator.
@Test
public void testIterator() throws Exception {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ClusterControlManager clusterControl = new ClusterControlManager(new LogContext(), Uuid.randomUuid().toString(), time, snapshotRegistry, 1000, new StripedReplicaPlacer(new Random()), new MockControllerMetrics());
clusterControl.activate();
assertFalse(clusterControl.unfenced(0));
for (int i = 0; i < 3; i++) {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(i).setRack(null);
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092 + i).setName("PLAINTEXT").setHost("example.com"));
clusterControl.replay(brokerRecord);
}
for (int i = 0; i < 2; i++) {
UnfenceBrokerRecord unfenceBrokerRecord = new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceBrokerRecord);
}
RecordTestUtils.assertBatchIteratorContains(Arrays.asList(Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(0).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(false), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9093).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(false), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(2).setRack(null).setEndPoints(new BrokerEndpointCollection(Collections.singleton(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9094).setName("PLAINTEXT").setHost("example.com")).iterator())).setFenced(true), (short) 0))), clusterControl.iterator(Long.MAX_VALUE));
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class BrokerRegistrationTest method testRoundTrip.
private void testRoundTrip(BrokerRegistration registration) {
ApiMessageAndVersion messageAndVersion = registration.toRecord();
BrokerRegistration registration2 = BrokerRegistration.fromRecord((RegisterBrokerRecord) messageAndVersion.message());
assertEquals(registration, registration2);
ApiMessageAndVersion messageAndVersion2 = registration2.toRecord();
assertEquals(messageAndVersion, messageAndVersion2);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class MetadataRecordSerdeTest method testParsingRecordWithGarbageAtEnd.
/**
* Test attempting to parse an event which has a malformed message version varint.
*/
@Test
public void testParsingRecordWithGarbageAtEnd() {
MetadataRecordSerde serde = new MetadataRecordSerde();
RegisterBrokerRecord message = new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(2);
ObjectSerializationCache cache = new ObjectSerializationCache();
ApiMessageAndVersion messageAndVersion = new ApiMessageAndVersion(message, (short) 0);
int size = serde.recordSize(messageAndVersion, cache);
ByteBuffer buffer = ByteBuffer.allocate(size + 1);
serde.write(messageAndVersion, cache, new ByteBufferAccessor(buffer));
buffer.clear();
assertStartsWith("Found 1 byte(s) of garbage after", assertThrows(MetadataParseException.class, () -> serde.read(new ByteBufferAccessor(buffer), size + 1)).getMessage());
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class KafkaClusterTestKit method close.
@Override
public void close() throws Exception {
List<Entry<String, Future<?>>> futureEntries = new ArrayList<>();
try {
controllerQuorumVotersFutureManager.close();
for (Entry<Integer, BrokerServer> entry : brokers.entrySet()) {
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
futureEntries.add(new SimpleImmutableEntry<>("broker" + brokerId, executorService.submit(broker::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
for (Entry<Integer, ControllerServer> entry : controllers.entrySet()) {
int controllerId = entry.getKey();
ControllerServer controller = entry.getValue();
futureEntries.add(new SimpleImmutableEntry<>("controller" + controllerId, executorService.submit(controller::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
for (Entry<Integer, KafkaRaftManager<ApiMessageAndVersion>> entry : raftManagers.entrySet()) {
int raftManagerId = entry.getKey();
KafkaRaftManager<ApiMessageAndVersion> raftManager = entry.getValue();
futureEntries.add(new SimpleImmutableEntry<>("raftManager" + raftManagerId, executorService.submit(raftManager::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
Utils.delete(baseDirectory);
} catch (Exception e) {
for (Entry<String, Future<?>> entry : futureEntries) {
entry.getValue().cancel(true);
}
throw e;
} finally {
executorService.shutdownNow();
executorService.awaitTermination(5, TimeUnit.MINUTES);
}
}
Aggregations