use of org.apache.flink.tests.util.flink.ClusterController in project flink by apache.
the class MetricsAvailabilityITCase method testReporter.
@Test
public void testReporter() throws Exception {
try (ClusterController ignored = dist.startCluster(1)) {
final RestClient restClient = new RestClient(new Configuration(), scheduledExecutorService);
checkJobManagerMetricAvailability(restClient);
final Collection<ResourceID> taskManagerIds = getTaskManagerIds(restClient);
for (final ResourceID taskManagerId : taskManagerIds) {
checkTaskManagerMetricAvailability(restClient, taskManagerId);
}
}
}
use of org.apache.flink.tests.util.flink.ClusterController in project flink by apache.
the class SQLClientHBaseITCase method testHBase.
@Test
public void testHBase() throws Exception {
try (ClusterController clusterController = flink.startCluster(2)) {
// Create table and put data
hbase.createTable("source", "family1", "family2");
hbase.createTable("sink", "family1", "family2");
hbase.putData("source", "row1", "family1", "f1c1", "v1");
hbase.putData("source", "row1", "family2", "f2c1", "v2");
hbase.putData("source", "row1", "family2", "f2c2", "v3");
hbase.putData("source", "row2", "family1", "f1c1", "v4");
hbase.putData("source", "row2", "family2", "f2c1", "v5");
hbase.putData("source", "row2", "family2", "f2c2", "v6");
// Initialize the SQL statements from "hbase_e2e.sql" file
Map<String, String> varsMap = new HashMap<>();
varsMap.put("$HBASE_CONNECTOR", hbaseConnector);
List<String> sqlLines = initializeSqlLines(varsMap);
// Execute SQL statements in "hbase_e2e.sql" file
executeSqlStatements(clusterController, sqlLines);
LOG.info("Verify the sink table result.");
// Wait until all the results flushed to the HBase sink table.
checkHBaseSinkResult();
LOG.info("The HBase SQL client test run successfully.");
}
}
use of org.apache.flink.tests.util.flink.ClusterController in project flink by apache.
the class PrometheusReporterEndToEndITCase method testReporter.
@Test
public void testReporter() throws Exception {
final Path tmpPrometheusDir = tmp.newFolder().toPath().resolve("prometheus");
final Path prometheusBinDir = tmpPrometheusDir.resolve(PROMETHEUS_FILE_NAME);
final Path prometheusConfig = prometheusBinDir.resolve("prometheus.yml");
final Path prometheusBinary = prometheusBinDir.resolve("prometheus");
Files.createDirectory(tmpPrometheusDir);
final Path prometheusArchive = downloadCache.getOrDownload("https://github.com/prometheus/prometheus/releases/download/v" + PROMETHEUS_VERSION + '/' + PROMETHEUS_FILE_NAME + ".tar.gz", tmpPrometheusDir);
LOG.info("Unpacking Prometheus.");
runBlocking(CommandLineWrapper.tar(prometheusArchive).extract().zipped().targetDir(tmpPrometheusDir).build());
LOG.info("Setting Prometheus scrape interval.");
runBlocking(CommandLineWrapper.sed("s/\\(scrape_interval:\\).*/\\1 1s/", prometheusConfig).inPlace().build());
try (ClusterController ignored = dist.startCluster(1)) {
final List<Integer> ports = dist.searchAllLogs(LOG_REPORTER_PORT_PATTERN, matcher -> matcher.group(1)).map(Integer::valueOf).collect(Collectors.toList());
final String scrapeTargets = ports.stream().map(port -> "'localhost:" + port + "'").collect(Collectors.joining(", "));
LOG.info("Setting Prometheus scrape targets to {}.", scrapeTargets);
runBlocking(CommandLineWrapper.sed("s/\\(targets:\\).*/\\1 [" + scrapeTargets + "]/", prometheusConfig).inPlace().build());
LOG.info("Starting Prometheus server.");
try (AutoClosableProcess prometheus = runNonBlocking(prometheusBinary.toAbsolutePath().toString(), "--config.file=" + prometheusConfig.toAbsolutePath(), "--storage.tsdb.path=" + prometheusBinDir.resolve("data").toAbsolutePath())) {
final OkHttpClient client = new OkHttpClient();
checkMetricAvailability(client, "flink_jobmanager_numRegisteredTaskManagers");
checkMetricAvailability(client, "flink_taskmanager_Status_Network_TotalMemorySegments");
}
}
}
use of org.apache.flink.tests.util.flink.ClusterController in project flink by apache.
the class SQLClientKafkaITCase method testKafka.
@Test
public void testKafka() throws Exception {
try (ClusterController clusterController = flink.startCluster(2)) {
// Create topic and send message
String testJsonTopic = "test-json-" + kafkaVersion + "-" + UUID.randomUUID().toString();
String testAvroTopic = "test-avro-" + kafkaVersion + "-" + UUID.randomUUID().toString();
kafka.createTopic(1, 1, testJsonTopic);
String[] messages = new String[] { "{\"rowtime\": \"2018-03-12 08:00:00\", \"user\": \"Alice\", \"event\": { \"type\": \"WARNING\", \"message\": \"This is a warning.\"}}", "{\"rowtime\": \"2018-03-12 08:10:00\", \"user\": \"Alice\", \"event\": { \"type\": \"WARNING\", \"message\": \"This is a warning.\"}}", "{\"rowtime\": \"2018-03-12 09:00:00\", \"user\": \"Bob\", \"event\": { \"type\": \"WARNING\", \"message\": \"This is another warning.\"}}", "{\"rowtime\": \"2018-03-12 09:10:00\", \"user\": \"Alice\", \"event\": { \"type\": \"INFO\", \"message\": \"This is a info.\"}}", "{\"rowtime\": \"2018-03-12 09:20:00\", \"user\": \"Steve\", \"event\": { \"type\": \"INFO\", \"message\": \"This is another info.\"}}", "{\"rowtime\": \"2018-03-12 09:30:00\", \"user\": \"Steve\", \"event\": { \"type\": \"INFO\", \"message\": \"This is another info.\"}}", "{\"rowtime\": \"2018-03-12 09:30:00\", \"user\": null, \"event\": { \"type\": \"WARNING\", \"message\": \"This is a bad message because the user is missing.\"}}", "{\"rowtime\": \"2018-03-12 10:40:00\", \"user\": \"Bob\", \"event\": { \"type\": \"ERROR\", \"message\": \"This is an error.\"}}" };
kafka.sendMessages(testJsonTopic, messages);
// Create topic test-avro
kafka.createTopic(1, 1, testAvroTopic);
// Initialize the SQL statements from "kafka_e2e.sql" file
Map<String, String> varsMap = new HashMap<>();
varsMap.put("$KAFKA_IDENTIFIER", this.kafkaIdentifier);
varsMap.put("$TOPIC_JSON_NAME", testJsonTopic);
varsMap.put("$TOPIC_AVRO_NAME", testAvroTopic);
varsMap.put("$RESULT", this.result.toAbsolutePath().toString());
varsMap.put("$KAFKA_BOOTSTRAP_SERVERS", StringUtils.join(kafka.getBootstrapServerAddresses().toArray(), ","));
List<String> sqlLines = initializeSqlLines(varsMap);
// Execute SQL statements in "kafka_e2e.sql" file
executeSqlStatements(clusterController, sqlLines);
// Wait until all the results flushed to the CSV file.
LOG.info("Verify the CSV result.");
checkCsvResultFile();
LOG.info("The Kafka({}) SQL client test run successfully.", this.kafkaSQLVersion);
}
}
Aggregations