use of com.datastax.oss.dsbulk.runner.ExitStatus in project dsbulk by datastax.
the class CSVConnectorEndToEndCCMIT method full_load_unload_load_of_custom_types.
/**
* Test for customType without associated codec. Data should be inserted as the blob. To transform
* DynamicCompositeType into blob:
*
* <pre>{@code
* ByteBuffer foo = com.datastax.driver.core.TestUtils.serializeForDynamicCompositeType("foo",32);
* String blobHex = com.datastax.driver.core.utils.Bytes.toHexString(foo.array());
* }</pre>
*
* <p>and uses blobHex to insert into table custom_types_table - c1 column (see custom-type.csv
* file for actual hex value)
*/
@Test
void full_load_unload_load_of_custom_types() throws Exception {
URL customTypesCsv = ClassLoader.getSystemResource("custom-type.csv");
session.execute("CREATE TABLE custom_types_table (k int PRIMARY KEY, c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)')");
List<String> args = new ArrayList<>();
args.add("load");
args.add("--connector.csv.url");
args.add(quoteJson(customTypesCsv));
args.add("--connector.csv.header");
args.add("false");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.table");
args.add("custom_types_table");
args.add("--schema.mapping");
args.add("k, c1");
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
validateResultSetSize(1, "SELECT * FROM custom_types_table");
FileUtils.deleteDirectory(logDir);
args = new ArrayList<>();
args.add("unload");
args.add("--connector.csv.url");
args.add(quoteJson(unloadDir));
args.add("--connector.csv.header");
args.add("false");
args.add("--connector.csv.maxConcurrentFiles");
args.add("1");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.table");
args.add("custom_types_table");
args.add("--schema.mapping");
args.add("k, c1");
status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
validateOutputFiles(1, unloadDir);
args = new ArrayList<>();
args.add("load");
args.add("--connector.csv.url");
args.add(quoteJson(unloadDir));
args.add("--connector.csv.header");
args.add("false");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.table");
args.add("custom_types_table");
args.add("--schema.mapping");
args.add("k, c1");
status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
validateResultSetSize(1, "SELECT * FROM custom_types_table");
}
use of com.datastax.oss.dsbulk.runner.ExitStatus in project dsbulk by datastax.
the class CSVConnectorEndToEndCCMIT method load_ttl_timestamp_now_in_query.
@Test
void load_ttl_timestamp_now_in_query() {
session.execute("DROP TABLE IF EXISTS table_ttl_timestamp");
session.execute("CREATE TABLE table_ttl_timestamp (key int PRIMARY KEY, value text, loaded_at timeuuid)");
List<String> args = Lists.newArrayList("load", "--log.directory", quoteJson(logDir), "--connector.csv.ignoreLeadingWhitespaces", "true", "--connector.csv.ignoreTrailingWhitespaces", "true", "--connector.csv.url", ClassLoader.getSystemResource("ttl-timestamp.csv").toExternalForm(), "--schema.keyspace", session.getKeyspace().get().asInternal(), "--schema.query", "insert into table_ttl_timestamp (key, value, loaded_at) " + "values (:key, :value, now()) " + "using ttl :time_to_live and timestamp :created_at");
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
assertTTLAndTimestamp();
}
use of com.datastax.oss.dsbulk.runner.ExitStatus in project dsbulk by datastax.
the class CSVConnectorEndToEndCCMIT method should_respect_mapping_variables_order.
/**
* Test for DAT-253.
*/
@Test
void should_respect_mapping_variables_order() throws Exception {
session.execute("DROP TABLE IF EXISTS mapping");
session.execute("CREATE TABLE IF NOT EXISTS mapping (key int PRIMARY KEY, value varchar)");
List<String> args = new ArrayList<>();
args.add("load");
args.add("--connector.csv.url");
args.add(ClassLoader.getSystemResource("invalid-mapping.csv").toExternalForm());
args.add("--connector.csv.header");
args.add("false");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.table");
args.add("mapping");
args.add("--schema.mapping");
args.add("value,key");
ExitStatus loadStatus = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(loadStatus, STATUS_COMPLETED_WITH_ERRORS);
assertThat(logs).hasMessageContaining("At least 1 record does not match the provided schema.mapping or schema.query");
FileUtils.deleteDirectory(logDir);
args = new ArrayList<>();
args.add("unload");
args.add("--connector.csv.url");
args.add(quoteJson(unloadDir));
args.add("--connector.csv.header");
args.add("false");
args.add("--connector.csv.maxConcurrentFiles");
args.add("1");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.table");
args.add("mapping");
args.add("--schema.mapping");
// note that the entries are not in proper order,
// the export should still order fields by index, so 'key,value' and not 'value,key'
args.add("1=value,0=key");
ExitStatus unloadStatus = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(unloadStatus, STATUS_OK);
List<String> lines = FileUtils.readAllLinesInDirectoryAsStream(unloadDir).collect(Collectors.toList());
assertThat(lines).hasSize(2).contains("1,ok1").contains("2,ok2");
}
use of com.datastax.oss.dsbulk.runner.ExitStatus in project dsbulk by datastax.
the class CSVConnectorEndToEndCCMIT method function_mapped_to_primary_key_with_custom_query_and_positional_variables.
/**
* Test for DAT-326.
*/
@Test
void function_mapped_to_primary_key_with_custom_query_and_positional_variables() {
session.execute("DROP TABLE IF EXISTS dat326c");
session.execute("CREATE TABLE IF NOT EXISTS dat326c (pk int, cc timeuuid, v int, PRIMARY KEY (pk, cc))");
List<String> args = Lists.newArrayList("load", "--log.directory", quoteJson(logDir), "-header", "true", "--connector.csv.url", quoteJson(getClass().getResource("/function-pk.csv")), "--schema.keyspace", session.getKeyspace().get().asInternal(), "--schema.query", "INSERT INTO dat326c (pk, cc, v) VALUES (?, now(), ?)");
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_OK);
}
use of com.datastax.oss.dsbulk.runner.ExitStatus in project dsbulk by datastax.
the class CSVConnectorEndToEndCCMIT method cas_load_with_errors.
/**
* Test for CAS failures (DAT-384).
*/
@Test
void cas_load_with_errors() {
session.execute("DROP TABLE IF EXISTS test_cas");
session.execute("CREATE TABLE test_cas (pk int, cc int, v int, PRIMARY KEY (pk, cc))");
session.execute("INSERT INTO test_cas (pk, cc, v) VALUES (1, 1, 1)");
session.execute("INSERT INTO test_cas (pk, cc, v) VALUES (1, 2, 2)");
session.execute("INSERT INTO test_cas (pk, cc, v) VALUES (1, 3, 3)");
// two failed CAS records will cause the entire batch to fail
// will fail
Record record1Failed = RecordUtils.mappedCSV("pk", "1", "cc", "1", "v", "1");
// will fail
Record record2Failed = RecordUtils.mappedCSV("pk", "1", "cc", "2", "v", "2");
Record record3NotApplied = // will not be applied
RecordUtils.mappedCSV("pk", "1", "cc", "4", "v", "4");
MockConnector.mockReads(record1Failed, record2Failed, record3NotApplied);
List<String> args = new ArrayList<>();
args.add("load");
args.add("--connector.name");
args.add("mock");
args.add("--schema.keyspace");
args.add(session.getKeyspace().get().asInternal());
args.add("--schema.query");
args.add("INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS");
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_COMPLETED_WITH_ERRORS);
Path bad = OperationDirectory.getCurrentOperationDirectory().map(dir -> dir.resolve("paxos.bad")).orElse(null);
assertThat(bad).exists();
assertThat(FileUtils.readAllLines(bad)).containsExactly(record1Failed.getSource().toString(), record2Failed.getSource().toString(), record3NotApplied.getSource().toString());
Path errors = OperationDirectory.getCurrentOperationDirectory().map(dir -> dir.resolve("paxos-errors.log")).orElse(null);
assertThat(errors).exists();
assertThat(FileUtils.readAllLines(errors).collect(Collectors.joining("\n"))).contains(String.format("Resource: %s\n" + " Position: %d\n" + " Source: %s\n" + " INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS\n" + " pk: 1\n" + " cc: 1\n" + " v: 1", record1Failed.getResource(), record1Failed.getPosition(), record1Failed.getSource()), String.format("Resource: %s\n" + " Position: %d\n" + " Source: %s\n" + " INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS\n" + " pk: 1\n" + " cc: 2\n" + " v: 2", record2Failed.getResource(), record2Failed.getPosition(), record2Failed.getSource()), String.format("Resource: %s\n" + " Position: %d\n" + " Source: %s\n" + " INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS\n" + " pk: 1\n" + " cc: 4\n" + " v: 4", record3NotApplied.getResource(), record3NotApplied.getPosition(), record3NotApplied.getSource()), "Failed conditional updates:", "\"[applied]\": false\npk: 1\ncc: 1\nv: 1", "\"[applied]\": false\npk: 1\ncc: 2\nv: 2");
List<Row> rows = session.execute("SELECT v FROM test_cas WHERE pk = 1").all();
assertThat(rows).hasSize(3);
assertThat(rows.get(0).getInt(0)).isEqualTo(1);
assertThat(rows.get(1).getInt(0)).isEqualTo(2);
assertThat(rows.get(2).getInt(0)).isEqualTo(3);
}
Aggregations