use of com.datastax.oss.dsbulk.workflow.commons.schema.ReadResultMapper in project dsbulk by datastax.
the class SchemaSettingsTest method should_create_row_mapper_when_keyspace_and_table_provided.
@Test
void should_create_row_mapper_when_keyspace_and_table_provided() {
Config config = TestConfigUtils.createTestConfig("dsbulk.schema", "keyspace", "ks", "table", "t1");
SchemaSettings settings = new SchemaSettings(config, READ_AND_MAP);
settings.init(session, codecFactory, false, true);
ReadResultMapper mapper = settings.createReadResultMapper(session, recordMetadata, codecFactory, true);
assertThat(mapper).isNotNull();
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
verify(session).prepare(argument.capture());
assertThat(argument.getValue()).isEqualTo("SELECT c1, \"COL 2\", c3 FROM ks.t1 WHERE token(c1) > :start AND token(c1) <= :end");
assertMapping(mapper, C1, C1, C2, C2, C3, C3);
}
use of com.datastax.oss.dsbulk.workflow.commons.schema.ReadResultMapper in project dsbulk by datastax.
the class UnloadWorkflow method oneWriter.
private Flux<Record> oneWriter() {
int numThreads = Math.min(numCores * 2, readConcurrency);
Scheduler scheduler = numThreads == 1 ? Schedulers.immediate() : Schedulers.newParallel(numThreads, new DefaultThreadFactory("workflow"));
schedulers.add(scheduler);
return Flux.fromIterable(readStatements).flatMap(results -> Flux.from(executor.readReactive(results)).publishOn(scheduler, 500).transform(queryWarningsHandler).transform(totalItemsMonitor).transform(totalItemsCounter).transform(failedReadResultsMonitor).transform(failedReadsHandler).map(readResultMapper::map).transform(failedRecordsMonitor).transform(unmappableRecordsHandler), readConcurrency, 500).transform(writer).transform(failedRecordsMonitor).transform(failedRecordsHandler);
}
Aggregations