use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.
the class CSVEndToEndSimulacronIT method unload_write_error.
@Test
void unload_write_error() {
Path file1 = unloadDir.resolve("output-000001.csv");
Path file2 = unloadDir.resolve("output-000002.csv");
Path file3 = unloadDir.resolve("output-000003.csv");
Path file4 = unloadDir.resolve("output-000004.csv");
MockConnector.setDelegate(new CSVConnector() {
@Override
public void configure(@NonNull Config settings, boolean read, boolean retainRecordSources) {
settings = ConfigFactory.parseString("url = " + quoteJson(unloadDir) + ", header = false, maxConcurrentFiles = 4").withFallback(ConfigUtils.createReferenceConfig().getConfig("dsbulk.connector.csv"));
super.configure(settings, read, retainRecordSources);
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
// will cause the write workers to fail because the files already exist
try {
Files.createFile(file1);
Files.createFile(file2);
Files.createFile(file3);
Files.createFile(file4);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return super.write();
}
});
primeIpByCountryTable(simulacron);
RequestPrime prime = createQueryWithResultSet(SELECT_FROM_IP_BY_COUNTRY, 10);
simulacron.prime(new Prime(prime));
String[] args = { "unload", "--connector.name", "mock", "--schema.keyspace", "ks1", "--schema.query", SELECT_FROM_IP_BY_COUNTRY, "--schema.mapping", IP_BY_COUNTRY_MAPPING_INDEXED };
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_ABORTED_FATAL_ERROR);
assertThat(stdErr.getStreamAsString()).contains("failed").containsPattern("output-00000[1-4].csv");
assertThat(logs.getAllMessagesAsString()).contains("failed").containsPattern("output-00000[1-4].csv");
}
use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.
the class MockConnector method mockWrites.
/**
* Sets up the mock connector to emulate writes; it will store all received records as if they
* were written to an external sink. The "written" records will appear in the returned list.
*
* @return the list where "written" records will be stored.
*/
public static List<Record> mockWrites() {
List<Record> records = new ArrayList<>();
setDelegate(new Connector() {
@Override
public void init() {
}
@Override
public void configure(@NonNull Config settings, boolean read, boolean retainRecordSources) {
}
@Override
public int readConcurrency() {
return -1;
}
@Override
public int writeConcurrency() {
return 1;
}
@Override
public boolean supports(@NonNull ConnectorFeature feature) {
return true;
}
@NonNull
@Override
public RecordMetadata getRecordMetadata() {
return (field, cql) -> GenericType.STRING;
}
@NonNull
@Override
public Publisher<Publisher<Record>> read() {
return Flux::just;
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
return upstream -> Flux.from(upstream).doOnNext(records::add);
}
});
return records;
}
use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.
the class MockConnector method mockCountingWrites.
/**
* Sets up the mock connector to emulate writes; it will acknowledge records as if they were
* written to an external sink. The "written" records will be counted and the total number of
* records "written" will be reflected in the returned AtomicInteger.
*
* @return a counter for the number of records "written".
*/
public static AtomicInteger mockCountingWrites() {
AtomicInteger records = new AtomicInteger();
setDelegate(new Connector() {
@Override
public void init() {
}
@Override
public void configure(@NonNull Config settings, boolean read, boolean retainRecordSources) {
}
@Override
public int readConcurrency() {
return -1;
}
@Override
public int writeConcurrency() {
return 1;
}
@Override
public boolean supports(@NonNull ConnectorFeature feature) {
return true;
}
@NonNull
@Override
public RecordMetadata getRecordMetadata() {
return (field, cql) -> GenericType.STRING;
}
@NonNull
@Override
public Publisher<Publisher<Record>> read() {
return Flux::just;
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
return upstream -> Flux.from(upstream).doOnNext(r -> records.incrementAndGet());
}
});
return records;
}
use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.
the class JsonEndToEndSimulacronIT method unload_write_error.
@Test
void unload_write_error() {
Path file1 = unloadDir.resolve("output-000001.json");
Path file2 = unloadDir.resolve("output-000002.json");
Path file3 = unloadDir.resolve("output-000003.json");
Path file4 = unloadDir.resolve("output-000004.json");
MockConnector.setDelegate(new JsonConnector() {
@Override
public void configure(@NonNull Config settings, boolean read, boolean retainRecordSources) {
settings = ConfigFactory.parseString("url = " + StringUtils.quoteJson(unloadDir) + ", maxConcurrentFiles = 4").withFallback(ConfigUtils.createReferenceConfig().getConfig("dsbulk.connector.json"));
super.configure(settings, read, retainRecordSources);
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
// will cause the write workers to fail because the files already exist
try {
Files.createFile(file1);
Files.createFile(file2);
Files.createFile(file3);
Files.createFile(file4);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return super.write();
}
@Override
public boolean supports(@NonNull ConnectorFeature feature) {
return true;
}
});
primeIpByCountryTable(simulacron);
RequestPrime prime = createQueryWithResultSet(SELECT_FROM_IP_BY_COUNTRY, 10);
simulacron.prime(new Prime(prime));
String[] args = { "unload", "--connector.name", "mock", "--schema.keyspace", "ks1", "--schema.query", SELECT_FROM_IP_BY_COUNTRY, "--schema.mapping", IP_BY_COUNTRY_MAPPING_NAMED };
ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
assertStatus(status, STATUS_ABORTED_FATAL_ERROR);
assertThat(stdErr.getStreamAsString()).contains("failed").containsPattern("output-00000[1-4].json");
assertThat(logs.getAllMessagesAsString()).contains("failed").containsPattern("output-00000[1-4].json");
}
Aggregations