use of org.testcontainers.containers.KafkaContainer in project flink by apache.
the class KafkaTestEnvironmentImpl method startKafkaContainerCluster.
private void startKafkaContainerCluster(int numBrokers) {
Network network = Network.newNetwork();
if (numBrokers > 1) {
zookeeper = createZookeeperContainer(network);
zookeeper.start();
LOG.info("Zookeeper container started");
}
for (int brokerID = 0; brokerID < numBrokers; brokerID++) {
KafkaContainer broker = createKafkaContainer(network, brokerID, zookeeper);
brokers.put(brokerID, broker);
}
new ArrayList<>(brokers.values()).parallelStream().forEach(GenericContainer::start);
LOG.info("{} brokers started", numBrokers);
brokerConnectionString = brokers.values().stream().map(KafkaContainer::getBootstrapServers).map(server -> server.split("://")[1]).collect(Collectors.joining(","));
}
use of org.testcontainers.containers.KafkaContainer in project beam by apache.
the class KafkaIOIT method setupKafkaContainer.
private static void setupKafkaContainer() {
kafkaContainer = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka").withTag(options.getKafkaContainerVersion()));
kafkaContainer.start();
options.setKafkaBootstrapServerAddresses(kafkaContainer.getBootstrapServers());
}
use of org.testcontainers.containers.KafkaContainer in project flink by apache.
the class KafkaTestEnvironmentImpl method createKafkaContainer.
private KafkaContainer createKafkaContainer(Network network, int brokerID, @Nullable GenericContainer<?> zookeeper) {
String brokerName = String.format("Kafka-%d", brokerID);
KafkaContainer broker = KafkaUtil.createKafkaContainer(DockerImageVersions.KAFKA, LOG, brokerName).withNetwork(network).withNetworkAliases(brokerName).withEnv("KAFKA_BROKER_ID", String.valueOf(brokerID)).withEnv("KAFKA_MESSAGE_MAX_BYTES", String.valueOf(50 * 1024 * 1024)).withEnv("KAFKA_REPLICA_FETCH_MAX_BYTES", String.valueOf(50 * 1024 * 1024)).withEnv("KAFKA_TRANSACTION_MAX_TIMEOUT_MS", Integer.toString(1000 * 60 * 60 * 2)).withEnv("KAFKA_LOG_RETENTION_MS", "-1").withEnv("KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS", String.valueOf(zkTimeout)).withEnv("KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS", String.valueOf(zkTimeout));
if (zookeeper != null) {
broker.dependsOn(zookeeper).withExternalZookeeper(String.format("%s:%d", ZOOKEEPER_HOSTNAME, ZOOKEEPER_PORT));
} else {
broker.withEmbeddedZookeeper();
}
return broker;
}
use of org.testcontainers.containers.KafkaContainer in project flink by apache.
the class KafkaUtil method createKafkaContainer.
/**
* This method helps to set commonly used Kafka configurations and aligns the internal Kafka log
* levels with the ones used by the capturing logger, and set the prefix of logger.
*/
public static KafkaContainer createKafkaContainer(String dockerImageVersion, Logger logger, String loggerPrefix) {
String logLevel;
if (logger.isTraceEnabled()) {
logLevel = "TRACE";
} else if (logger.isDebugEnabled()) {
logLevel = "DEBUG";
} else if (logger.isInfoEnabled()) {
logLevel = "INFO";
} else if (logger.isWarnEnabled()) {
logLevel = "WARN";
} else if (logger.isErrorEnabled()) {
logLevel = "ERROR";
} else {
logLevel = "OFF";
}
Slf4jLogConsumer logConsumer = new Slf4jLogConsumer(logger);
if (!StringUtils.isNullOrWhitespaceOnly(loggerPrefix)) {
logConsumer.withPrefix(loggerPrefix);
}
return new KafkaContainer(DockerImageName.parse(dockerImageVersion)).withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", "1").withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", "1").withEnv("KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE", "false").withEnv("KAFKA_LOG4J_ROOT_LOGLEVEL", logLevel).withEnv("KAFKA_LOG4J_LOGGERS", "state.change.logger=" + logLevel).withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", "1").withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", "1").withEnv("KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE", "false").withEnv("KAFKA_TRANSACTION_MAX_TIMEOUT_MS", String.valueOf(Duration.ofHours(2).toMillis())).withEnv("KAFKA_LOG4J_TOOLS_ROOT_LOGLEVEL", logLevel).withLogConsumer(logConsumer);
}
Aggregations