use of com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_OK in project dsbulk by datastax.
the class PrometheusEndToEndSimulacronIT method should_exchange_metrics_by_pull_and_push.
@Test
void should_exchange_metrics_by_pull_and_push() throws IOException, InterruptedException {
MockConnector.setDelegate(newConnectorDelegate());
SimulacronUtils.primeTables(simulacron, new Keyspace("ks1", new Table("table1", new Column("pk", TEXT), new Column("cc", TEXT), new Column("v", TEXT))));
String[] args = { "load", "-c", "mock", "--engine.executionId", "LOAD1", "--schema.keyspace", "ks1", "--schema.table", "table1", "--monitoring.console", "false", "--monitoring.prometheus.push.enabled", "true", "--monitoring.prometheus.pull.enabled", "true", "--monitoring.prometheus.labels", "{ foo = bar }", "--monitoring.prometheus.job", "job1", "--monitoring.prometheus.push.groupBy.instance", "true", "--monitoring.prometheus.push.url", "http://" + gateway.getHost() + ":" + gateway.getMappedPort(9091), "--monitoring.trackBytes", "true", "--driver.advanced.metrics.node.enabled", "pool.open-connections" };
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
assertThat(logs).hasMessageContaining("Prometheus Metrics HTTP server listening on 0.0.0.0:8080");
// Give some time for Prometheus to scrape the gateway
Thread.sleep(7_000);
// assert that metrics were pulled directly from DSBulk
URL prometheusPullQuery = new URL("http", "localhost", prometheus.getMappedPort(9090), "/api/v1/query?query=dsbulk_records_total" + "{instance=\"host.testcontainers.internal:8080\"}[5m]");
assertThat(Resources.toString(prometheusPullQuery, StandardCharsets.UTF_8)).contains("\"status\":\"success\"").contains("dsbulk_records_total").contains("\"application_name\":\"DataStax Bulk Loader LOAD1\"").contains("\"application_version\":\"" + WorkflowUtils.getBulkLoaderVersion() + "\"").contains("\"client_id\":\"fc93e4ac-7fa5-394f-814e-21b735d04c10\"").contains("\"driver_version\":\"" + Session.OSS_DRIVER_COORDINATES.getVersion() + "\"").contains("\"instance\":\"host.testcontainers.internal:8080\"").contains("\"job\":\"dsbulk\"").contains("\"exported_job\":\"job1\"").contains("\"foo\":\"bar\"").contains("\"operation_id\":\"LOAD1\"");
URL prometheusDriverPullQuery = new URL("http", "localhost", prometheus.getMappedPort(9090), "/api/v1/query?query=dsbulk_driver_nodes_pool_open_connections" + "{instance=\"host.testcontainers.internal:8080\"}[5m]");
assertThat(Resources.toString(prometheusDriverPullQuery, StandardCharsets.UTF_8)).contains("\"status\":\"success\"").contains("dsbulk_driver_nodes_pool_open_connections").contains("\"node\":\"" + hostname + ":" + port + "\"").contains("\"application_name\":\"DataStax Bulk Loader LOAD1\"").contains("\"application_version\":\"" + WorkflowUtils.getBulkLoaderVersion() + "\"").contains("\"client_id\":\"fc93e4ac-7fa5-394f-814e-21b735d04c10\"").contains("\"driver_version\":\"" + Session.OSS_DRIVER_COORDINATES.getVersion() + "\"").contains("\"instance\":\"host.testcontainers.internal:8080\"").contains("\"job\":\"dsbulk\"").contains("\"exported_job\":\"job1\"").contains("\"foo\":\"bar\"").contains("\"operation_id\":\"LOAD1\"");
// assert that metrics were pushed to the gateway
URL gatewayQuery = new URL("http", "localhost", gateway.getMappedPort(9091), "/metrics");
String labelsAndValue = "\\{" + "application_name=\"DataStax Bulk Loader LOAD1\"," + "application_version=\"" + WorkflowUtils.getBulkLoaderVersion() + "\"," + "client_id=\"fc93e4ac-7fa5-394f-814e-21b735d04c10\"," + "driver_version=\"" + Session.OSS_DRIVER_COORDINATES.getVersion() + "\"," + "foo=\"bar\"," + "instance=\".+\"," + "job=\"job1\"," + "operation_id=\"LOAD1\"} .+";
assertThat(Resources.readLines(gatewayQuery, StandardCharsets.UTF_8)).anySatisfy(line -> assertThat(line).matches("dsbulk_success" + labelsAndValue)).anySatisfy(line -> assertThat(line).matches("dsbulk_last_success" + labelsAndValue)).anySatisfy(line -> assertThat(line).matches("dsbulk_records_total" + labelsAndValue)).allSatisfy(line -> assertThat(line).doesNotContain("dsbulk_driver_nodes_pool_open_connections"));
// assert that Prometheus scraped DSBulk metrics from the gateway
URL prometheusPullGatewayQuery = new URL("http", "localhost", prometheus.getMappedPort(9090), "/api/v1/query?query=dsbulk_records_total{instance=\"gateway:9091\"}[5m]");
assertThat(Resources.toString(prometheusPullGatewayQuery, StandardCharsets.UTF_8)).contains("\"status\":\"success\"").contains("dsbulk_records_total").contains("\"application_name\":\"DataStax Bulk Loader LOAD1\"").contains("\"application_version\":\"" + WorkflowUtils.getBulkLoaderVersion() + "\"").contains("\"client_id\":\"fc93e4ac-7fa5-394f-814e-21b735d04c10\"").contains("\"driver_version\":\"" + Session.OSS_DRIVER_COORDINATES.getVersion() + "\"").contains("\"instance\":\"gateway:9091\"").contains("\"job\":\"gateway\"").contains("\"exported_job\":\"job1\"").contains("\"foo\":\"bar\"").contains("\"operation_id\":\"LOAD1\"");
// assert that driver metrics did not arrive to Prometheus through the gateway
URL prometheusDriverPullGatewayQuery = new URL("http", "localhost", prometheus.getMappedPort(9090), "/api/v1/query?query=dsbulk_driver_nodes_pool_open_connections{instance=\"gateway:9091\"}[5m]");
assertThat(Resources.toString(prometheusDriverPullGatewayQuery, StandardCharsets.UTF_8)).isEqualTo("{\"status\":\"success\",\"data\":{\"resultType\":\"matrix\",\"result\":[]}}");
}
use of com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_OK in project dsbulk by datastax.
the class SearchEndToEndCCMIT method normal_unload_of_search_enabled_table.
/**
* Test for DAT-365: regular unload of a search-enabled table should not contain the solr_query
* column.
*/
@Test
void normal_unload_of_search_enabled_table() {
session.execute("CREATE TABLE IF NOT EXISTS test_search2 (pk int, cc int, v varchar, PRIMARY KEY (pk, cc))");
session.execute("CREATE SEARCH INDEX IF NOT EXISTS ON test_search2 WITH COLUMNS v { indexed:true };");
session.execute("INSERT INTO test_search2 (pk, cc, v) VALUES (0, 0, 'foo')");
session.execute("INSERT INTO test_search2 (pk, cc, v) VALUES (0, 1, 'bar')");
session.execute("INSERT INTO test_search2 (pk, cc, v) VALUES (0, 2, 'qix')");
// Wait until index is built
await().atMost(ONE_MINUTE).until(() -> !session.execute("SELECT v FROM test_search2 WHERE solr_query = '{\"q\": \"v:foo\"}'").all().isEmpty());
List<String> args = new ArrayList<>();
args.add("unload");
args.add("--connector.name");
args.add("mock");
args.add("--schema.keyspace");
args.add(session.getKeyspace().map(CqlIdentifier::asInternal).orElseThrow(IllegalStateException::new));
args.add("--schema.table");
args.add("test_search2");
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
assertThat(records).hasSize(3).satisfies(record -> {
assertThat(record.fields()).hasSize(3);
assertThat(record.getFieldValue(new DefaultMappedField("pk"))).isEqualTo("0");
assertThat(record.getFieldValue(new DefaultMappedField("cc"))).isEqualTo("0");
assertThat(record.getFieldValue(new DefaultMappedField("v"))).isEqualTo("foo");
}, Index.atIndex(0)).satisfies(record -> {
assertThat(record.fields()).hasSize(3);
assertThat(record.getFieldValue(new DefaultMappedField("pk"))).isEqualTo("0");
assertThat(record.getFieldValue(new DefaultMappedField("cc"))).isEqualTo("1");
assertThat(record.getFieldValue(new DefaultMappedField("v"))).isEqualTo("bar");
}, Index.atIndex(1)).satisfies(record -> {
assertThat(record.fields()).hasSize(3);
assertThat(record.getFieldValue(new DefaultMappedField("pk"))).isEqualTo("0");
assertThat(record.getFieldValue(new DefaultMappedField("cc"))).isEqualTo("2");
assertThat(record.getFieldValue(new DefaultMappedField("v"))).isEqualTo("qix");
}, Index.atIndex(2));
}
use of com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_OK in project dsbulk by datastax.
the class JsonConnectorEndToEndCCMIT method full_load_query_warnings.
/**
* Test for DAT-451.
*/
@Test
void full_load_query_warnings() throws Exception {
assumeTrue(ccm.getCassandraVersion().compareTo(V3) >= 0, "Query warnings are only present in C* >= 3.0");
List<String> args = new ArrayList<>();
args.add("load");
args.add("--connector.name");
args.add("json");
args.add("--log.maxQueryWarnings");
args.add("1");
args.add("--connector.json.url");
args.add(StringUtils.quoteJson(JsonUtils.JSON_RECORDS));
args.add("--batch.mode");
args.add("REPLICA_SET");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.table");
args.add("ip_by_country");
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
validateResultSetSize(500, "SELECT * FROM ip_by_country");
validatePositionsFile(JsonUtils.JSON_RECORDS, 500);
/*
Unlogged batch covering N partitions detected against table [ks1.ip_by_country].
You should use a logged batch for atomicity, or asynchronous writes for performance.
DSE 6.0+:
Unlogged batch covering 20 partitions detected against table {ks1.ip_by_country}.
You should use a logged batch for atomicity, or asynchronous writes for performance.
*/
assertThat(logs).hasMessageContaining("Query generated server-side warning").hasMessageMatching("Unlogged batch covering \\d+ partitions detected").hasMessageContaining(session.getKeyspace().get().asCql(true) + ".ip_by_country").hasMessageContaining("The maximum number of logged query warnings has been exceeded (1); " + "subsequent warnings will not be logged.");
assertThat(stderr.getStreamLinesPlain()).anySatisfy(line -> assertThat(line).contains("Query generated server-side warning")).anySatisfy(line -> assertThat(line).containsPattern("Unlogged batch covering \\d+ partitions detected")).anySatisfy(line -> assertThat(line).contains(session.getKeyspace().get().asCql(true) + ".ip_by_country")).anySatisfy(line -> assertThat(line).contains("The maximum number of logged query warnings has been exceeded (1); " + "subsequent warnings will not be logged."));
}
Aggregations