use of org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection in project kafka by apache.
the class QuorumControllerTest method testMissingInMemorySnapshot.
@Test
public void testMissingInMemorySnapshot() throws Exception {
int numBrokers = 3;
int numPartitions = 3;
String topicName = "topic-name";
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(1, Optional.empty());
QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
QuorumController controller = controlEnv.activeController();
Map<Integer, Long> brokerEpochs = registerBrokers(controller, numBrokers);
// Create a lot of partitions
List<CreatableReplicaAssignment> partitions = IntStream.range(0, numPartitions).mapToObj(partitionIndex -> new CreatableReplicaAssignment().setPartitionIndex(partitionIndex).setBrokerIds(Arrays.asList(0, 1, 2))).collect(Collectors.toList());
Uuid topicId = controller.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName(topicName).setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(partitions.iterator()))).iterator()))).get().topics().find(topicName).topicId();
// Create a lot of alter isr
List<AlterIsrRequestData.PartitionData> alterIsrs = IntStream.range(0, numPartitions).mapToObj(partitionIndex -> {
PartitionRegistration partitionRegistration = controller.replicationControl().getPartition(topicId, partitionIndex);
return new AlterIsrRequestData.PartitionData().setPartitionIndex(partitionIndex).setLeaderEpoch(partitionRegistration.leaderEpoch).setCurrentIsrVersion(partitionRegistration.partitionEpoch).setNewIsr(Arrays.asList(0, 1));
}).collect(Collectors.toList());
AlterIsrRequestData.TopicData topicData = new AlterIsrRequestData.TopicData().setName(topicName);
topicData.partitions().addAll(alterIsrs);
int leaderId = 0;
AlterIsrRequestData alterIsrRequest = new AlterIsrRequestData().setBrokerId(leaderId).setBrokerEpoch(brokerEpochs.get(leaderId));
alterIsrRequest.topics().add(topicData);
logEnv.logManagers().get(0).resignAfterNonAtomicCommit();
int oldClaimEpoch = controller.curClaimEpoch();
assertThrows(ExecutionException.class, () -> controller.alterIsr(alterIsrRequest).get());
// Wait for the controller to become active again
assertSame(controller, controlEnv.activeController());
assertTrue(oldClaimEpoch < controller.curClaimEpoch(), String.format("oldClaimEpoch = %s, newClaimEpoch = %s", oldClaimEpoch, controller.curClaimEpoch()));
// Since the alterIsr partially failed we expect to see
// some partitions to still have 2 in the ISR.
int partitionsWithReplica2 = Utils.toList(controller.replicationControl().brokersToIsrs().partitionsWithBrokerInIsr(2)).size();
int partitionsWithReplica0 = Utils.toList(controller.replicationControl().brokersToIsrs().partitionsWithBrokerInIsr(0)).size();
assertEquals(numPartitions, partitionsWithReplica0);
assertNotEquals(0, partitionsWithReplica2);
assertTrue(partitionsWithReplica0 > partitionsWithReplica2, String.format("partitionsWithReplica0 = %s, partitionsWithReplica2 = %s", partitionsWithReplica0, partitionsWithReplica2));
}
}
use of org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection in project kafka by apache.
the class InternalTopicManagerTest method shouldThrowInformativeExceptionForOlderBrokers.
@Test
public void shouldThrowInformativeExceptionForOlderBrokers() {
final AdminClient admin = new MockAdminClient() {
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options) {
final CreatableTopic topicToBeCreated = new CreatableTopic();
topicToBeCreated.setAssignments(new CreatableReplicaAssignmentCollection());
topicToBeCreated.setNumPartitions((short) 1);
// set unsupported replication factor for older brokers
topicToBeCreated.setReplicationFactor((short) -1);
final CreatableTopicCollection topicsToBeCreated = new CreatableTopicCollection();
topicsToBeCreated.add(topicToBeCreated);
try {
new CreateTopicsRequest.Builder(new CreateTopicsRequestData().setTopics(topicsToBeCreated).setTimeoutMs(0).setValidateOnly(options.shouldValidateOnly())).build(// pass in old unsupported request version for old brokers
(short) 3);
throw new IllegalStateException("Building CreateTopicRequest should have thrown.");
} catch (final UnsupportedVersionException expected) {
final KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(expected);
return new CreateTopicsResult(Collections.singletonMap(topic1, future)) {
};
}
}
};
final StreamsConfig streamsConfig = new StreamsConfig(config);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, streamsConfig);
final InternalTopicConfig topicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
topicConfig.setNumberOfPartitions(1);
final StreamsException exception = assertThrows(StreamsException.class, () -> topicManager.makeReady(Collections.singletonMap(topic1, topicConfig)));
assertThat(exception.getMessage(), equalTo("Could not create topic " + topic1 + ", because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error."));
}
use of org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection in project kafka by apache.
the class QuorumControllerTest method testSnapshotConfiguration.
@Test
public void testSnapshotConfiguration() throws Throwable {
final int numBrokers = 4;
final int maxNewRecordBytes = 4;
Map<Integer, Long> brokerEpochs = new HashMap<>();
Uuid fooId;
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, builder -> {
builder.setConfigDefs(CONFIGS).setSnapshotMaxNewRecordBytes(maxNewRecordBytes);
})) {
QuorumController active = controlEnv.activeController();
for (int i = 0; i < numBrokers; i++) {
BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
brokerEpochs.put(i, reply.epoch());
}
for (int i = 0; i < numBrokers - 1; i++) {
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
}
CreateTopicsResponseData fooData = active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
fooId = fooData.topics().find("foo").topicId();
active.allocateProducerIds(new AllocateProducerIdsRequestData().setBrokerId(0).setBrokerEpoch(brokerEpochs.get(0))).get();
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(logEnv.waitForLatestSnapshot());
checkSnapshotSubcontent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
}
use of org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection in project kafka by apache.
the class QuorumControllerTest method testSnapshotSaveAndLoad.
@Test
public void testSnapshotSaveAndLoad() throws Throwable {
final int numBrokers = 4;
Map<Integer, Long> brokerEpochs = new HashMap<>();
RawSnapshotReader reader = null;
Uuid fooId;
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
QuorumController active = controlEnv.activeController();
for (int i = 0; i < numBrokers; i++) {
BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
brokerEpochs.put(i, reply.epoch());
}
for (int i = 0; i < numBrokers - 1; i++) {
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
}
CreateTopicsResponseData fooData = active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName("foo").setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
fooId = fooData.topics().find("foo").topicId();
active.allocateProducerIds(new AllocateProducerIdsRequestData().setBrokerId(0).setBrokerEpoch(brokerEpochs.get(0))).get();
long snapshotLogOffset = active.beginWritingSnapshot().get();
reader = logEnv.waitForSnapshot(snapshotLogOffset);
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(reader);
assertEquals(snapshotLogOffset, snapshot.lastContainedLogOffset());
checkSnapshotContent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.of(reader))) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, b -> b.setConfigDefs(CONFIGS))) {
QuorumController active = controlEnv.activeController();
long snapshotLogOffset = active.beginWritingSnapshot().get();
SnapshotReader<ApiMessageAndVersion> snapshot = createSnapshotReader(logEnv.waitForSnapshot(snapshotLogOffset));
assertEquals(snapshotLogOffset, snapshot.lastContainedLogOffset());
checkSnapshotContent(expectedSnapshotContent(fooId, brokerEpochs), snapshot);
}
}
}
use of org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection in project kafka by apache.
the class QuorumControllerTest method testSnapshotOnlyAfterConfiguredMinBytes.
@Test
public void testSnapshotOnlyAfterConfiguredMinBytes() throws Throwable {
final int numBrokers = 4;
final int maxNewRecordBytes = 1000;
Map<Integer, Long> brokerEpochs = new HashMap<>();
try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv(3, Optional.empty())) {
try (QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv(logEnv, builder -> builder.setConfigDefs(CONFIGS).setSnapshotMaxNewRecordBytes(maxNewRecordBytes))) {
QuorumController active = controlEnv.activeController();
for (int i = 0; i < numBrokers; i++) {
BrokerRegistrationReply reply = active.registerBroker(new BrokerRegistrationRequestData().setBrokerId(i).setRack(null).setClusterId(active.clusterId()).setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)).setListeners(new ListenerCollection(Arrays.asList(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092 + i)).iterator()))).get();
brokerEpochs.put(i, reply.epoch());
assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(new BrokerHeartbeatRequestData().setWantFence(false).setBrokerEpoch(brokerEpochs.get(i)).setBrokerId(i).setCurrentMetadataOffset(100000L)).get());
}
assertTrue(logEnv.appendedBytes() < maxNewRecordBytes, String.format("%s appended bytes is not less than %s max new record bytes", logEnv.appendedBytes(), maxNewRecordBytes));
// Keep creating topic until we reached the max bytes limit
int counter = 0;
while (logEnv.appendedBytes() < maxNewRecordBytes) {
counter += 1;
String topicName = String.format("foo-%s", counter);
active.createTopics(new CreateTopicsRequestData().setTopics(new CreatableTopicCollection(Collections.singleton(new CreatableTopic().setName(topicName).setNumPartitions(-1).setReplicationFactor((short) -1).setAssignments(new CreatableReplicaAssignmentCollection(Arrays.asList(new CreatableReplicaAssignment().setPartitionIndex(0).setBrokerIds(Arrays.asList(0, 1, 2)), new CreatableReplicaAssignment().setPartitionIndex(1).setBrokerIds(Arrays.asList(1, 2, 0))).iterator()))).iterator()))).get();
}
logEnv.waitForLatestSnapshot();
}
}
}
Aggregations