use of com.datastax.oss.dsbulk.connectors.api.DefaultIndexedField in project dsbulk by datastax.
the class SchemaSettingsTest method assertMapping.
private static void assertMapping(DefaultMapping mapping, Object... fieldsAndVars) {
ImmutableSetMultimap.Builder<Object, Object> expected = ImmutableSetMultimap.builder();
for (int i = 0; i < fieldsAndVars.length; i += 2) {
String first = fieldsAndVars[i] instanceof String ? (String) fieldsAndVars[i] : ((CqlIdentifier) fieldsAndVars[i]).asInternal();
CQLWord second = fieldsAndVars[i + 1] instanceof String ? CQLWord.fromInternal((String) fieldsAndVars[i + 1]) : CQLWord.fromCqlIdentifier((CqlIdentifier) fieldsAndVars[i + 1]);
if (CharMatcher.inRange('0', '9').matchesAllOf(first)) {
expected.put(new DefaultIndexedField(Integer.parseInt(first)), second);
} else {
expected.put(new DefaultMappedField(first), second);
}
}
@SuppressWarnings("unchecked") SetMultimap<Field, CQLWord> fieldsToVariables = (SetMultimap<Field, CQLWord>) getInternalState(mapping, "fieldsToVariables");
assertThat(fieldsToVariables).isEqualTo(expected.build());
}
use of com.datastax.oss.dsbulk.connectors.api.DefaultIndexedField in project dsbulk by datastax.
the class CSVConnectorTest method should_honor_ignoreLeadingWhitespaces_and_ignoreTrailingWhitespaces_when_reading.
@Test
void should_honor_ignoreLeadingWhitespaces_and_ignoreTrailingWhitespaces_when_reading() throws Exception {
Path file = Files.createTempFile("test", ".csv");
Files.write(file, Collections.singleton(" foo "));
CSVConnector connector = new CSVConnector();
Config settings = TestConfigUtils.createTestConfig("dsbulk.connector.csv", "url", StringUtils.quoteJson(file), "ignoreLeadingWhitespaces", false, "ignoreTrailingWhitespaces", false, "header", false);
connector.configure(settings, true, true);
connector.init();
List<Record> records = Flux.merge(connector.read()).collectList().block();
assertThat(records).hasSize(1);
assertThat(records.get(0).getFieldValue(new DefaultIndexedField(0))).isEqualTo(" foo ");
connector.close();
}
use of com.datastax.oss.dsbulk.connectors.api.DefaultIndexedField in project dsbulk by datastax.
the class CSVConnectorTest method should_honor_multi_char_delimiter.
@Test
void should_honor_multi_char_delimiter() throws Exception {
CSVConnector connector = new CSVConnector();
Config settings = TestConfigUtils.createTestConfig("dsbulk.connector.csv", "url", url("/multi-char-delimiter.csv"), "delimiter", "\"||\"", "ignoreLeadingWhitespaces", true, "ignoreTrailingWhitespaces", true, "header", true);
connector.configure(settings, true, true);
connector.init();
List<Record> records = Flux.merge(connector.read()).collectList().block();
assertThat(records).hasSize(1);
Record record = records.get(0);
assertThat(record.fields()).hasSize(6);
assertThat(record.getFieldValue(new DefaultIndexedField(0))).isEqualTo("foo");
assertThat(record.getFieldValue(new DefaultIndexedField(1))).isEqualTo("|bar|");
assertThat(record.getFieldValue(new DefaultIndexedField(2))).isEqualTo("foo||bar");
assertThat(record.getFieldValue(new DefaultMappedField("field A"))).isEqualTo("foo");
assertThat(record.getFieldValue(new DefaultMappedField("field B"))).isEqualTo("|bar|");
assertThat(record.getFieldValue(new DefaultMappedField("field C"))).isEqualTo("foo||bar");
connector.close();
}
use of com.datastax.oss.dsbulk.connectors.api.DefaultIndexedField in project dsbulk by datastax.
the class RecordUtils method indexedCSV.
public static Record indexedCSV(String... values) {
int counter = COUNTER.incrementAndGet();
DefaultRecord record = DefaultRecord.indexed("source" + counter, URI.create("file://file" + counter + ".csv"), counter - 1);
for (int i = 0; i < values.length; i++) {
record.put(new DefaultIndexedField(i), values[i]);
}
return record;
}
use of com.datastax.oss.dsbulk.connectors.api.DefaultIndexedField in project dsbulk by datastax.
the class RecordUtils method mappedCSV.
public static Record mappedCSV(String... tokens) {
int counter = COUNTER.incrementAndGet();
DefaultRecord record = DefaultRecord.indexed("source" + counter, URI.create("file://file" + counter + ".csv"), counter - 1);
for (int i = 0; i < tokens.length; i += 2) {
record.put(new DefaultMappedField(tokens[i]), tokens[i + 1]);
record.put(new DefaultIndexedField(i % 2), tokens[i + 1]);
}
return record;
}
Aggregations