use of org.apache.kafka.connect.mirror.SourceAndTarget in project kafka by apache.
the class MirrorConnectorsIntegrationBaseTest method startClusters.
public void startClusters(Map<String, String> additionalMM2Config) throws Exception {
shuttingDown = false;
exitProcedure = (code, message) -> {
if (shuttingDown) {
// ignore this since we're shutting down Connect and Kafka and timing isn't always great
return;
}
if (code != 0) {
String exitMessage = "Abrupt service exit with code " + code + " and message " + message;
log.warn(exitMessage);
throw new UngracefulShutdownException(exitMessage);
}
};
haltProcedure = (code, message) -> {
if (shuttingDown) {
// ignore this since we're shutting down Connect and Kafka and timing isn't always great
return;
}
if (code != 0) {
String haltMessage = "Abrupt service halt with code " + code + " and message " + message;
log.warn(haltMessage);
throw new UngracefulShutdownException(haltMessage);
}
};
// Override the exit and halt procedure that Connect and Kafka will use. For these integration tests,
// we don't want to exit the JVM and instead simply want to fail the test
Exit.setExitProcedure(exitProcedure);
Exit.setHaltProcedure(haltProcedure);
primaryBrokerProps.put("auto.create.topics.enable", "false");
backupBrokerProps.put("auto.create.topics.enable", "false");
mm2Props.putAll(basicMM2Config());
mm2Props.putAll(additionalMM2Config);
// exclude topic config:
mm2Props.put(DefaultConfigPropertyFilter.CONFIG_PROPERTIES_EXCLUDE_CONFIG, "delete\\.retention\\..*");
mm2Config = new MirrorMakerConfig(mm2Props);
primaryWorkerProps = mm2Config.workerConfig(new SourceAndTarget(BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS));
backupWorkerProps.putAll(mm2Config.workerConfig(new SourceAndTarget(PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS)));
primary = new EmbeddedConnectCluster.Builder().name(PRIMARY_CLUSTER_ALIAS + "-connect-cluster").numWorkers(NUM_WORKERS).numBrokers(1).brokerProps(primaryBrokerProps).workerProps(primaryWorkerProps).maskExitProcedures(false).build();
backup = new EmbeddedConnectCluster.Builder().name(BACKUP_CLUSTER_ALIAS + "-connect-cluster").numWorkers(NUM_WORKERS).numBrokers(1).brokerProps(backupBrokerProps).workerProps(backupWorkerProps).maskExitProcedures(false).build();
primary.start();
primary.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS, "Workers of " + PRIMARY_CLUSTER_ALIAS + "-connect-cluster did not start in time.");
waitForTopicCreated(primary, "mm2-status.backup.internal");
waitForTopicCreated(primary, "mm2-offsets.backup.internal");
waitForTopicCreated(primary, "mm2-configs.backup.internal");
backup.start();
backup.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS, "Workers of " + BACKUP_CLUSTER_ALIAS + "-connect-cluster did not start in time.");
waitForTopicCreated(backup, "mm2-status.primary.internal");
waitForTopicCreated(backup, "mm2-offsets.primary.internal");
waitForTopicCreated(backup, "mm2-configs.primary.internal");
createTopics();
warmUpConsumer(Collections.singletonMap("group.id", "consumer-group-dummy"));
log.info(PRIMARY_CLUSTER_ALIAS + " REST service: {}", primary.endpointForResource("connectors"));
log.info(BACKUP_CLUSTER_ALIAS + " REST service: {}", backup.endpointForResource("connectors"));
log.info(PRIMARY_CLUSTER_ALIAS + " brokers: {}", primary.kafka().bootstrapServers());
log.info(BACKUP_CLUSTER_ALIAS + " brokers: {}", backup.kafka().bootstrapServers());
// now that the brokers are running, we can finish setting up the Connectors
mm2Props.put(PRIMARY_CLUSTER_ALIAS + ".bootstrap.servers", primary.kafka().bootstrapServers());
mm2Props.put(BACKUP_CLUSTER_ALIAS + ".bootstrap.servers", backup.kafka().bootstrapServers());
}
Aggregations