use of org.testcontainers.containers.GenericContainer in project flink by apache.
the class KafkaTestEnvironmentImpl method startKafkaContainerCluster.
private void startKafkaContainerCluster(int numBrokers) {
Network network = Network.newNetwork();
if (numBrokers > 1) {
zookeeper = createZookeeperContainer(network);
zookeeper.start();
LOG.info("Zookeeper container started");
}
for (int brokerID = 0; brokerID < numBrokers; brokerID++) {
KafkaContainer broker = createKafkaContainer(network, brokerID, zookeeper);
brokers.put(brokerID, broker);
}
new ArrayList<>(brokers.values()).parallelStream().forEach(GenericContainer::start);
LOG.info("{} brokers started", numBrokers);
brokerConnectionString = brokers.values().stream().map(KafkaContainer::getBootstrapServers).map(server -> server.split("://")[1]).collect(Collectors.joining(","));
}
use of org.testcontainers.containers.GenericContainer in project flink by apache.
the class FlinkContainersBuilder method build.
/**
* Builds {@link FlinkContainers}.
*/
public FlinkContainers build() {
// Setup Zookeeper HA
GenericContainer<?> zookeeper = null;
if (enableZookeeperHA) {
enableZookeeperHAConfigurations();
zookeeper = buildZookeeperContainer();
}
// Add common configurations
this.conf.set(JobManagerOptions.ADDRESS, JOB_MANAGER_HOSTNAME);
this.conf.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, CHECKPOINT_PATH.toAbsolutePath().toUri().toString());
this.conf.set(RestOptions.BIND_ADDRESS, "0.0.0.0");
this.conf.set(JobManagerOptions.BIND_HOST, "0.0.0.0");
this.conf.set(TaskManagerOptions.BIND_HOST, "0.0.0.0");
// Create temporary directory for building Flink image
final Path imageBuildingTempDir;
try {
imageBuildingTempDir = Files.createTempDirectory("flink-image-build");
} catch (IOException e) {
throw new RuntimeException("Failed to create temporary directory", e);
}
// Build JobManager
final GenericContainer<?> jobManager = buildJobManagerContainer(imageBuildingTempDir);
// Build TaskManagers
final List<GenericContainer<?>> taskManagers = buildTaskManagerContainers(imageBuildingTempDir);
// Mount HA storage to JobManager
if (enableZookeeperHA) {
createTempDirAndMountToContainer("flink-recovery", HA_STORAGE_PATH, jobManager);
}
// Mount checkpoint storage to JobManager
createTempDirAndMountToContainer("flink-checkpoint", CHECKPOINT_PATH, jobManager);
return new FlinkContainers(jobManager, taskManagers, zookeeper, conf);
}
use of org.testcontainers.containers.GenericContainer in project spring-boot by spring-projects.
the class PaketoBuilderTests method plainDistZipJarApp.
@Test
void plainDistZipJarApp() throws Exception {
writeMainClass();
String projectName = this.gradleBuild.getProjectDir().getName();
String imageName = "paketo-integration/" + projectName;
ImageReference imageReference = ImageReference.of(ImageName.of(imageName));
BuildResult result = buildImage(imageName, "assemble", "bootDistZip");
assertThat(result.task(":bootBuildImage").getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
try (GenericContainer<?> container = new GenericContainer<>(imageName).withExposedPorts(8080)) {
container.waitingFor(Wait.forHttp("/test")).start();
ContainerConfig config = container.getContainerInfo().getConfig();
ImageAssertions.assertThat(config).buildMetadata((metadata) -> {
metadata.buildpacks().contains("paketo-buildpacks/ca-certificates", "paketo-buildpacks/bellsoft-liberica", "paketo-buildpacks/dist-zip", "paketo-buildpacks/spring-boot");
metadata.processOfType("web").extracting("command", "args").containsExactly("/workspace/" + projectName + "/bin/" + projectName, Collections.emptyList());
metadata.processOfType("dist-zip").extracting("command", "args").containsExactly("/workspace/" + projectName + "/bin/" + projectName, Collections.emptyList());
});
assertImageHasSbomLayer(imageReference, config, "dist-zip");
DigestCapturingCondition digest = new DigestCapturingCondition();
ImageAssertions.assertThat(config).lifecycleMetadata((metadata) -> metadata.appLayerShas().haveExactly(1, digest));
ImageAssertions.assertThat(imageReference).layer(digest.getDigest(), (layer) -> layer.entries().contains(projectName + "/bin/" + projectName, projectName + "/lib/" + projectName + "-plain.jar").anyMatch((s) -> s.startsWith(projectName + "/lib/spring-boot-")).anyMatch((s) -> s.startsWith(projectName + "/lib/spring-core-")).anyMatch((s) -> s.startsWith(projectName + "/lib/spring-web-")));
} finally {
removeImage(imageReference);
}
}
use of org.testcontainers.containers.GenericContainer in project spring-boot by spring-projects.
the class PaketoBuilderTests method executableJarAppWithAdditionalArgs.
@Test
void executableJarAppWithAdditionalArgs() throws Exception {
writeMainClass();
String imageName = "paketo-integration/" + this.gradleBuild.getProjectDir().getName();
ImageReference imageReference = ImageReference.of(ImageName.of(imageName));
BuildResult result = buildImage(imageName);
assertThat(result.task(":bootBuildImage").getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
try (GenericContainer<?> container = new GenericContainer<>(imageName).withCommand("--server.port=9090").withExposedPorts(9090)) {
container.waitingFor(Wait.forHttp("/test")).start();
} finally {
removeImage(imageReference);
}
}
use of org.testcontainers.containers.GenericContainer in project zipkin by openzipkin.
the class ServerIntegratedBenchmark method runBenchmark.
void runBenchmark(@Nullable GenericContainer<?> storage, GenericContainer<?> zipkin) throws Exception {
GenericContainer<?> backend = new GenericContainer<>(parse("ghcr.io/openzipkin/brave-example:armeria")).withNetwork(Network.SHARED).withNetworkAliases("backend").withCommand("backend").withExposedPorts(9000).waitingFor(Wait.forHealthcheck());
GenericContainer<?> frontend = new GenericContainer<>(parse("ghcr.io/openzipkin/brave-example:armeria")).withNetwork(Network.SHARED).withNetworkAliases("frontend").withCommand("frontend").withExposedPorts(8081).waitingFor(Wait.forHealthcheck());
containers.add(frontend);
// Use a quay.io mirror to prevent build outages due to Docker Hub pull quotas
// Use same version as in docker/examples/docker-compose-prometheus.yml
GenericContainer<?> prometheus = new GenericContainer<>(parse("quay.io/prometheus/prometheus:v2.23.0")).withNetwork(Network.SHARED).withNetworkAliases("prometheus").withExposedPorts(9090).withCopyFileToContainer(MountableFile.forClasspathResource("prometheus.yml"), "/etc/prometheus/prometheus.yml");
containers.add(prometheus);
// Use a quay.io mirror to prevent build outages due to Docker Hub pull quotas
// Use same version as in docker/examples/docker-compose-prometheus.yml
GenericContainer<?> grafana = new GenericContainer<>(parse("quay.io/app-sre/grafana:7.3.4")).withNetwork(Network.SHARED).withNetworkAliases("grafana").withExposedPorts(3000).withEnv("GF_AUTH_ANONYMOUS_ENABLED", "true").withEnv("GF_AUTH_ANONYMOUS_ORG_ROLE", "Admin");
containers.add(grafana);
// This is an arbitrary small image that has curl installed
// Use a quay.io mirror to prevent build outages due to Docker Hub pull quotas
// Use same version as in docker/examples/docker-compose-prometheus.yml
GenericContainer<?> grafanaDashboards = new GenericContainer<>(parse("quay.io/rackspace/curl:7.70.0")).withNetwork(Network.SHARED).withWorkingDirectory("/tmp").withLogConsumer(new Slf4jLogConsumer(LOG)).withCreateContainerCmdModifier(it -> it.withEntrypoint("/tmp/create.sh")).withCopyFileToContainer(MountableFile.forClasspathResource("create-datasource-and-dashboard.sh", 555), "/tmp/create.sh");
containers.add(grafanaDashboards);
// Use a quay.io mirror to prevent build outages due to Docker Hub pull quotas
GenericContainer<?> wrk = new GenericContainer<>(parse("quay.io/dim/wrk:stable")).withNetwork(Network.SHARED).withCreateContainerCmdModifier(it -> it.withEntrypoint("wrk")).withCommand("-t4 -c128 -d100s http://frontend:8081 --latency");
containers.add(wrk);
grafanaDashboards.dependsOn(grafana);
wrk.dependsOn(frontend, backend, prometheus, grafanaDashboards, zipkin);
if (storage != null)
wrk.dependsOn(storage);
Startables.deepStart(Stream.of(wrk)).join();
System.out.println("Benchmark started.");
if (zipkin != null)
printContainerMapping(zipkin);
if (storage != null)
printContainerMapping(storage);
printContainerMapping(backend);
printContainerMapping(frontend);
printContainerMapping(prometheus);
printContainerMapping(grafana);
while (wrk.isRunning()) {
Thread.sleep(1000);
}
// Wait for prometheus to do a final scrape.
Thread.sleep(5000);
System.out.println("Benchmark complete, wrk output:");
System.out.println(wrk.getLogs().replace("\n\n", "\n"));
WebClient prometheusClient = WebClient.of("h1c://" + prometheus.getContainerIpAddress() + ":" + prometheus.getFirstMappedPort());
System.out.println(String.format("Messages received: %s", prometheusValue(prometheusClient, "sum(zipkin_collector_messages_total)")));
System.out.println(String.format("Spans received: %s", prometheusValue(prometheusClient, "sum(zipkin_collector_spans_total)")));
System.out.println(String.format("Spans dropped: %s", prometheusValue(prometheusClient, "sum(zipkin_collector_spans_dropped_total)")));
System.out.println("Memory quantiles:");
printQuartiles(prometheusClient, "jvm_memory_used_bytes{area=\"heap\"}");
printQuartiles(prometheusClient, "jvm_memory_used_bytes{area=\"nonheap\"}");
System.out.println(String.format("Total GC time (s): %s", prometheusValue(prometheusClient, "sum(jvm_gc_pause_seconds_sum)")));
System.out.println(String.format("Number of GCs: %s", prometheusValue(prometheusClient, "sum(jvm_gc_pause_seconds_count)")));
System.out.println("POST Spans latency (s)");
printHistogram(prometheusClient, "http_server_requests_seconds_bucket{" + "method=\"POST\",status=\"202\",uri=\"/api/v2/spans\"}");
if (WAIT_AFTER_BENCHMARK) {
System.out.println("Keeping containers running until explicit termination. " + "Feel free to poke around in grafana.");
Thread.sleep(Long.MAX_VALUE);
}
}
Aggregations